summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2015-10-05 08:57:16 +0000
committermav <mav@FreeBSD.org>2015-10-05 08:57:16 +0000
commit568256e886a60d82abd8dd14e4148510164ced1e (patch)
treea1a6cce50856a05f5b36bed04631d5bd16485556
parent63b0da2b15c899762f1e6cd3b08ec33ef11175c0 (diff)
downloadFreeBSD-src-568256e886a60d82abd8dd14e4148510164ced1e.zip
FreeBSD-src-568256e886a60d82abd8dd14e4148510164ced1e.tar.gz
MFC r287621: Reimplement CTL High Availability.
CTL HA functionality was originally implemented by Copan many years ago, but large part of the sources was never published. This change includes clean room implementation of the missing code and fixes for many bugs. This code supports dual-node HA with ALUA in four modes: - Active/Unavailable without interlink between nodes; - Active/Standby with second node handling only basic LUN discovery and reservation, synchronizing with the first node through the interlink; - Active/Active with both nodes processing commands and accessing the backing storage, synchronizing with the first node through the interlink; - Active/Active with second node working as proxy, transfering all commands to the first node for execution through the interlink. Unlike original Copan's implementation, depending on specific hardware, this code uses simple custom TCP-based protocol for interlink. It has no authentication, so it should never be enabled on public interfaces. The code may still need some polishing, but generally it is functional. Relnotes: yes Sponsored by: iXsystems, Inc.
-rw-r--r--sys/cam/ctl/README.ctl.txt30
-rw-r--r--sys/cam/ctl/ctl.c2735
-rw-r--r--sys/cam/ctl/ctl.h26
-rw-r--r--sys/cam/ctl/ctl_backend.h6
-rw-r--r--sys/cam/ctl/ctl_backend_block.c80
-rw-r--r--sys/cam/ctl/ctl_backend_ramdisk.c36
-rw-r--r--sys/cam/ctl/ctl_cmd_table.c126
-rw-r--r--sys/cam/ctl/ctl_error.c24
-rw-r--r--sys/cam/ctl/ctl_error.h2
-rw-r--r--sys/cam/ctl/ctl_frontend.c35
-rw-r--r--sys/cam/ctl/ctl_frontend_cam_sim.c1
-rw-r--r--sys/cam/ctl/ctl_frontend_ioctl.c1
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.c1
-rw-r--r--sys/cam/ctl/ctl_ha.c958
-rw-r--r--sys/cam/ctl/ctl_ha.h196
-rw-r--r--sys/cam/ctl/ctl_io.h81
-rw-r--r--sys/cam/ctl/ctl_private.h22
-rw-r--r--sys/cam/ctl/ctl_tpc.c20
-rw-r--r--sys/cam/ctl/ctl_tpc_local.c8
-rw-r--r--sys/cam/ctl/scsi_ctl.c1
-rw-r--r--sys/conf/files1
-rw-r--r--sys/modules/ctl/Makefile1
22 files changed, 2399 insertions, 1992 deletions
diff --git a/sys/cam/ctl/README.ctl.txt b/sys/cam/ctl/README.ctl.txt
index b3b08f5..dd38cb5 100644
--- a/sys/cam/ctl/README.ctl.txt
+++ b/sys/cam/ctl/README.ctl.txt
@@ -43,12 +43,9 @@ Features:
- Persistent reservation support
- Mode sense/select support
- Error injection support
- - High Availability support (1)
+ - High Availability support
- All I/O handled in-kernel, no userland context switch overhead.
-(1) HA Support is just an API stub, and needs much more to be fully
- functional. See the to-do list below.
-
Configuring and Running CTL:
===========================
@@ -245,27 +242,6 @@ To Do List:
another data structure in the stack, more memory allocations, etc. This
will also require changes to the CAM CCB structure to support CTL.
- - Full-featured High Availability support. The HA API that is in ctl_ha.h
- is essentially a renamed version of Copan's HA API. There is no
- substance to it, but it remains in CTL to show what needs to be done to
- implement active/active HA from a CTL standpoint. The things that would
- need to be done include:
- - A kernel level software API for message passing as well as DMA
- between at least two nodes.
- - Hardware support and drivers for inter-node communication. This
- could be as simples as ethernet hardware and drivers.
- - A "supervisor", or startup framework to control and coordinate
- HA startup, failover (going from active/active to single mode),
- and failback (going from single mode to active/active).
- - HA support in other components of the stack. The goal behind HA
- is that one node can fail and another node can seamlessly take
- over handling I/O requests. This requires support from pretty
- much every component in the storage stack, from top to bottom.
- CTL is one piece of it, but you also need support in the RAID
- stack/filesystem/backing store. You also need full configuration
- mirroring, and all peer nodes need to be able to talk to the
- underlying storage hardware.
-
Code Roadmap:
============
@@ -365,11 +341,11 @@ This is a CTL frontend port that is also a CAM SIM. The idea is that this
frontend allows for using CTL without any target-capable hardware. So any
LUNs you create in CTL are visible via this port.
+ctl_ha.c:
ctl_ha.h:
--------
-This is a stubbed-out High Availability API. See the comments in the
-header and the description of what is needed as far as HA support above.
+This is a High Availability API and TCP-based interlink implementation.
ctl_io.h:
--------
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 9a192df..fa6963e 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -1,6 +1,7 @@
/*-
* Copyright (c) 2003-2009 Silicon Graphics International Corp.
* Copyright (c) 2012 The FreeBSD Foundation
+ * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed by Edward Tomasz Napierala
@@ -84,25 +85,6 @@ __FBSDID("$FreeBSD$");
struct ctl_softc *control_softc = NULL;
/*
- * Size and alignment macros needed for Copan-specific HA hardware. These
- * can go away when the HA code is re-written, and uses busdma for any
- * hardware.
- */
-#define CTL_ALIGN_8B(target, source, type) \
- if (((uint32_t)source & 0x7) != 0) \
- target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
- else \
- target = (type)source;
-
-#define CTL_SIZE_8B(target, size) \
- if ((size & 0x7) != 0) \
- target = size + (0x8 - (size & 0x7)); \
- else \
- target = size;
-
-#define CTL_ALIGN_8B_MARGIN 16
-
-/*
* Template mode pages.
*/
@@ -351,12 +333,6 @@ const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{
}
};
-/*
- * XXX KDM move these into the softc.
- */
-static int rcv_sync_msg;
-static uint8_t ctl_pause_rtr;
-
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
static int worker_threads = -1;
TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
@@ -375,11 +351,10 @@ SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
*/
#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10
-#ifdef notyet
static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
int param);
static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
-#endif
+static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
static int ctl_init(void);
void ctl_shutdown(void);
static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
@@ -395,10 +370,6 @@ static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
static int ctl_free_lun(struct ctl_lun *lun);
static void ctl_create_lun(struct ctl_be_lun *be_lun);
static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr);
-/**
-static void ctl_failover_change_pages(struct ctl_softc *softc,
- struct ctl_scsiio *ctsio, int master);
-**/
static int ctl_do_mode_select(union ctl_io *io);
static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
@@ -435,10 +406,11 @@ static int ctl_check_blocked(struct ctl_lun *lun);
static int ctl_scsiio_lun_check(struct ctl_lun *lun,
const struct ctl_cmd_entry *entry,
struct ctl_scsiio *ctsio);
-//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
-#ifdef notyet
-static void ctl_failover(void);
-#endif
+static void ctl_failover_lun(struct ctl_lun *lun);
+static void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
+static void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
+static void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
+static void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
ctl_ua_type ua_type);
static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
@@ -477,9 +449,7 @@ static void ctl_work_thread(void *arg);
static void ctl_enqueue_incoming(union ctl_io *io);
static void ctl_enqueue_rtr(union ctl_io *io);
static void ctl_enqueue_done(union ctl_io *io);
-#ifdef notyet
static void ctl_enqueue_isc(union ctl_io *io);
-#endif
static const struct ctl_cmd_entry *
ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa);
static const struct ctl_cmd_entry *
@@ -487,6 +457,11 @@ static const struct ctl_cmd_entry *
static int ctl_cmd_applicable(uint8_t lun_type,
const struct ctl_cmd_entry *entry);
+static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key);
+
/*
* Load the serialization table. This isn't very pretty, but is probably
* the easiest way to do it.
@@ -519,7 +494,11 @@ static moduledata_t ctl_moduledata = {
DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
MODULE_VERSION(ctl, 1);
-#ifdef notyet
+static struct ctl_frontend ha_frontend =
+{
+ .name = "ha",
+};
+
static void
ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
union ctl_ha_msg *msg_info)
@@ -541,7 +520,7 @@ ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
ctsio->sense_residual = msg_info->scsi.sense_residual;
ctsio->residual = msg_info->scsi.residual;
memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
- sizeof(ctsio->sense_data));
+ msg_info->scsi.sense_len);
memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
&msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
ctl_enqueue_isc((union ctl_io *)ctsio);
@@ -560,38 +539,327 @@ ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
}
ctsio = &msg_info->hdr.serializing_sc->scsiio;
-#if 0
- /*
- * Attempt to catch the situation where an I/O has
- * been freed, and we're using it again.
- */
- if (ctsio->io_hdr.io_type == 0xff) {
- union ctl_io *tmp_io;
- tmp_io = (union ctl_io *)ctsio;
- printf("%s: %p use after free!\n", __func__,
- ctsio);
- printf("%s: type %d msg %d cdb %x iptl: "
- "%u:%u:%u tag 0x%04x "
- "flag %#x status %x\n",
- __func__,
- tmp_io->io_hdr.io_type,
- tmp_io->io_hdr.msg_type,
- tmp_io->scsiio.cdb[0],
- tmp_io->io_hdr.nexus.initid,
- tmp_io->io_hdr.nexus.targ_port,
- tmp_io->io_hdr.nexus.targ_lun,
- (tmp_io->io_hdr.io_type ==
- CTL_IO_TASK) ?
- tmp_io->taskio.tag_num :
- tmp_io->scsiio.tag_num,
- tmp_io->io_hdr.flags,
- tmp_io->io_hdr.status);
- }
-#endif
ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
ctl_enqueue_isc((union ctl_io *)ctsio);
}
+void
+ctl_isc_announce_lun(struct ctl_lun *lun)
+{
+ struct ctl_softc *softc = lun->ctl_softc;
+ union ctl_ha_msg *msg;
+ struct ctl_ha_msg_lun_pr_key pr_key;
+ int i, k;
+
+ if (softc->ha_link != CTL_HA_LINK_ONLINE)
+ return;
+ mtx_lock(&lun->lun_lock);
+ i = sizeof(msg->lun);
+ if (lun->lun_devid)
+ i += lun->lun_devid->len;
+ i += sizeof(pr_key) * lun->pr_key_count;
+alloc:
+ mtx_unlock(&lun->lun_lock);
+ msg = malloc(i, M_CTL, M_WAITOK);
+ mtx_lock(&lun->lun_lock);
+ k = sizeof(msg->lun);
+ if (lun->lun_devid)
+ k += lun->lun_devid->len;
+ k += sizeof(pr_key) * lun->pr_key_count;
+ if (i < k) {
+ free(msg, M_CTL);
+ i = k;
+ goto alloc;
+ }
+ bzero(&msg->lun, sizeof(msg->lun));
+ msg->hdr.msg_type = CTL_MSG_LUN_SYNC;
+ msg->hdr.nexus.targ_lun = lun->lun;
+ msg->hdr.nexus.targ_mapped_lun = lun->lun;
+ msg->lun.flags = lun->flags;
+ msg->lun.pr_generation = lun->PRGeneration;
+ msg->lun.pr_res_idx = lun->pr_res_idx;
+ msg->lun.pr_res_type = lun->res_type;
+ msg->lun.pr_key_count = lun->pr_key_count;
+ i = 0;
+ if (lun->lun_devid) {
+ msg->lun.lun_devid_len = lun->lun_devid->len;
+ memcpy(&msg->lun.data[i], lun->lun_devid->data,
+ msg->lun.lun_devid_len);
+ i += msg->lun.lun_devid_len;
+ }
+ for (k = 0; k < CTL_MAX_INITIATORS; k++) {
+ if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0)
+ continue;
+ pr_key.pr_iid = k;
+ memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key));
+ i += sizeof(pr_key);
+ }
+ mtx_unlock(&lun->lun_lock);
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
+ M_WAITOK);
+ free(msg, M_CTL);
+}
+
+void
+ctl_isc_announce_port(struct ctl_port *port)
+{
+ struct ctl_softc *softc = control_softc;
+ union ctl_ha_msg *msg;
+ int i;
+
+ if (port->targ_port < softc->port_min ||
+ port->targ_port >= softc->port_max ||
+ softc->ha_link != CTL_HA_LINK_ONLINE)
+ return;
+ i = sizeof(msg->port) + strlen(port->port_name) + 1;
+ if (port->lun_map)
+ i += sizeof(uint32_t) * CTL_MAX_LUNS;
+ if (port->port_devid)
+ i += port->port_devid->len;
+ if (port->target_devid)
+ i += port->target_devid->len;
+ msg = malloc(i, M_CTL, M_WAITOK);
+ bzero(&msg->port, sizeof(msg->port));
+ msg->hdr.msg_type = CTL_MSG_PORT_SYNC;
+ msg->hdr.nexus.targ_port = port->targ_port;
+ msg->port.port_type = port->port_type;
+ msg->port.physical_port = port->physical_port;
+ msg->port.virtual_port = port->virtual_port;
+ msg->port.status = port->status;
+ i = 0;
+ msg->port.name_len = sprintf(&msg->port.data[i],
+ "%d:%s", softc->ha_id, port->port_name) + 1;
+ i += msg->port.name_len;
+ if (port->lun_map) {
+ msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS;
+ memcpy(&msg->port.data[i], port->lun_map,
+ msg->port.lun_map_len);
+ i += msg->port.lun_map_len;
+ }
+ if (port->port_devid) {
+ msg->port.port_devid_len = port->port_devid->len;
+ memcpy(&msg->port.data[i], port->port_devid->data,
+ msg->port.port_devid_len);
+ i += msg->port.port_devid_len;
+ }
+ if (port->target_devid) {
+ msg->port.target_devid_len = port->target_devid->len;
+ memcpy(&msg->port.data[i], port->target_devid->data,
+ msg->port.target_devid_len);
+ i += msg->port.target_devid_len;
+ }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
+ M_WAITOK);
+ free(msg, M_CTL);
+}
+
+static void
+ctl_isc_ha_link_up(struct ctl_softc *softc)
+{
+ struct ctl_port *port;
+ struct ctl_lun *lun;
+
+ STAILQ_FOREACH(port, &softc->port_list, links)
+ ctl_isc_announce_port(port);
+ STAILQ_FOREACH(lun, &softc->lun_list, links)
+ ctl_isc_announce_lun(lun);
+}
+
+static void
+ctl_isc_ha_link_down(struct ctl_softc *softc)
+{
+ struct ctl_port *port;
+ struct ctl_lun *lun;
+ union ctl_io *io;
+
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
+ lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
+ mtx_unlock(&lun->lun_lock);
+
+ mtx_unlock(&softc->ctl_lock);
+ io = ctl_alloc_io(softc->othersc_pool);
+ mtx_lock(&softc->ctl_lock);
+ ctl_zero_io(io);
+ io->io_hdr.msg_type = CTL_MSG_FAILOVER;
+ io->io_hdr.nexus.targ_mapped_lun = lun->lun;
+ ctl_enqueue_isc(io);
+ }
+
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port->targ_port >= softc->port_min &&
+ port->targ_port < softc->port_max)
+ continue;
+ port->status &= ~CTL_PORT_STATUS_ONLINE;
+ }
+ mtx_unlock(&softc->ctl_lock);
+}
+
+static void
+ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+ struct ctl_lun *lun;
+ uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
+
+ if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS &&
+ (lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]) != NULL) {
+ if (msg->ua.ua_all) {
+ if (msg->ua.ua_set)
+ ctl_est_ua_all(lun, iid, msg->ua.ua_type);
+ else
+ ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
+ } else {
+ if (msg->ua.ua_set)
+ ctl_est_ua(lun, iid, msg->ua.ua_type);
+ else
+ ctl_clr_ua(lun, iid, msg->ua.ua_type);
+ }
+ }
+}
+
+static void
+ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+ struct ctl_lun *lun;
+ struct ctl_ha_msg_lun_pr_key pr_key;
+ int i, k;
+
+ lun = softc->ctl_luns[msg->hdr.nexus.targ_lun];
+ if (lun == NULL) {
+ CTL_DEBUG_PRINT(("%s: Unknown LUN %d\n", __func__,
+ msg->hdr.nexus.targ_lun));
+ } else {
+ mtx_lock(&lun->lun_lock);
+ i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
+ if (msg->lun.lun_devid_len != i || (i > 0 &&
+ memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
+ mtx_unlock(&lun->lun_lock);
+ printf("%s: Received conflicting HA LUN %d\n",
+ __func__, msg->hdr.nexus.targ_lun);
+ return;
+ } else {
+ /* Record whether peer is primary. */
+ if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
+ (msg->lun.flags & CTL_LUN_DISABLED) == 0)
+ lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
+ else
+ lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
+
+ /* If peer is primary and we are not -- use data */
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+ (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
+ lun->PRGeneration = msg->lun.pr_generation;
+ lun->pr_res_idx = msg->lun.pr_res_idx;
+ lun->res_type = msg->lun.pr_res_type;
+ lun->pr_key_count = msg->lun.pr_key_count;
+ for (k = 0; k < CTL_MAX_INITIATORS; k++)
+ ctl_clr_prkey(lun, k);
+ for (k = 0; k < msg->lun.pr_key_count; k++) {
+ memcpy(&pr_key, &msg->lun.data[i],
+ sizeof(pr_key));
+ ctl_alloc_prkey(lun, pr_key.pr_iid);
+ ctl_set_prkey(lun, pr_key.pr_iid,
+ pr_key.pr_key);
+ i += sizeof(pr_key);
+ }
+ }
+
+ mtx_unlock(&lun->lun_lock);
+ CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
+ __func__, msg->hdr.nexus.targ_lun,
+ (msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
+ "primary" : "secondary"));
+
+ /* If we are primary but peer doesn't know -- notify */
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
+ (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
+ ctl_isc_announce_lun(lun);
+ }
+ }
+}
+
+static void
+ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+ struct ctl_port *port;
+ int i, new;
+
+ port = softc->ctl_ports[msg->hdr.nexus.targ_port];
+ if (port == NULL) {
+ CTL_DEBUG_PRINT(("%s: New port %d\n", __func__,
+ msg->hdr.nexus.targ_port));
+ new = 1;
+ port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO);
+ port->frontend = &ha_frontend;
+ port->targ_port = msg->hdr.nexus.targ_port;
+ } else if (port->frontend == &ha_frontend) {
+ CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__,
+ msg->hdr.nexus.targ_port));
+ new = 0;
+ } else {
+ printf("%s: Received conflicting HA port %d\n",
+ __func__, msg->hdr.nexus.targ_port);
+ return;
+ }
+ port->port_type = msg->port.port_type;
+ port->physical_port = msg->port.physical_port;
+ port->virtual_port = msg->port.virtual_port;
+ port->status = msg->port.status;
+ i = 0;
+ free(port->port_name, M_CTL);
+ port->port_name = strndup(&msg->port.data[i], msg->port.name_len,
+ M_CTL);
+ i += msg->port.name_len;
+ if (msg->port.lun_map_len != 0) {
+ if (port->lun_map == NULL)
+ port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
+ M_CTL, M_WAITOK);
+ memcpy(port->lun_map, &msg->port.data[i],
+ sizeof(uint32_t) * CTL_MAX_LUNS);
+ i += msg->port.lun_map_len;
+ } else {
+ free(port->lun_map, M_CTL);
+ port->lun_map = NULL;
+ }
+ if (msg->port.port_devid_len != 0) {
+ if (port->port_devid == NULL ||
+ port->port_devid->len != msg->port.port_devid_len) {
+ free(port->port_devid, M_CTL);
+ port->port_devid = malloc(sizeof(struct ctl_devid) +
+ msg->port.port_devid_len, M_CTL, M_WAITOK);
+ }
+ memcpy(port->port_devid->data, &msg->port.data[i],
+ msg->port.port_devid_len);
+ port->port_devid->len = msg->port.port_devid_len;
+ i += msg->port.port_devid_len;
+ } else {
+ free(port->port_devid, M_CTL);
+ port->port_devid = NULL;
+ }
+ if (msg->port.target_devid_len != 0) {
+ if (port->target_devid == NULL ||
+ port->target_devid->len != msg->port.target_devid_len) {
+ free(port->target_devid, M_CTL);
+ port->target_devid = malloc(sizeof(struct ctl_devid) +
+ msg->port.target_devid_len, M_CTL, M_WAITOK);
+ }
+ memcpy(port->target_devid->data, &msg->port.data[i],
+ msg->port.target_devid_len);
+ port->target_devid->len = msg->port.target_devid_len;
+ i += msg->port.target_devid_len;
+ } else {
+ free(port->port_devid, M_CTL);
+ port->port_devid = NULL;
+ }
+ if (new) {
+ if (ctl_port_register(port) != 0) {
+ printf("%s: ctl_port_register() failed with error\n",
+ __func__);
+ }
+ }
+}
+
/*
* ISC (Inter Shelf Communication) event handler. Events from the HA
* subsystem come in here.
@@ -605,54 +873,33 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
ctl_ha_status isc_status;
softc = control_softc;
- io = NULL;
-
-
-#if 0
- printf("CTL: Isc Msg event %d\n", event);
-#endif
+ CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event));
if (event == CTL_HA_EVT_MSG_RECV) {
- union ctl_ha_msg msg_info;
+ union ctl_ha_msg *msg, msgbuf;
- isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), /*wait*/ 0);
-#if 0
- printf("CTL: msg_type %d\n", msg_info.msg_type);
-#endif
- if (isc_status != 0) {
- printf("Error receiving message, status = %d\n",
- isc_status);
+ if (param > sizeof(msgbuf))
+ msg = malloc(param, M_CTL, M_WAITOK);
+ else
+ msg = &msgbuf;
+ isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param,
+ M_WAITOK);
+ if (isc_status != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: Error receiving message: %d\n",
+ __func__, isc_status);
+ if (msg != &msgbuf)
+ free(msg, M_CTL);
return;
}
- switch (msg_info.hdr.msg_type) {
+ CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type));
+ switch (msg->hdr.msg_type) {
case CTL_MSG_SERIALIZE:
-#if 0
- printf("Serialize\n");
-#endif
- io = ctl_alloc_io_nowait(softc->othersc_pool);
- if (io == NULL) {
- printf("ctl_isc_event_handler: can't allocate "
- "ctl_io!\n");
- /* Bad Juju */
- /* Need to set busy and send msg back */
- msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
- msg_info.hdr.status = CTL_SCSI_ERROR;
- msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
- msg_info.scsi.sense_len = 0;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
- }
- goto bailout;
- }
+ io = ctl_alloc_io(softc->othersc_pool);
ctl_zero_io(io);
- // populate ctsio from msg_info
+ // populate ctsio from msg
io->io_hdr.io_type = CTL_IO_SCSI;
io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
- io->io_hdr.original_sc = msg_info.hdr.original_sc;
-#if 0
- printf("pOrig %x\n", (int)msg_info.original_sc);
-#endif
+ io->io_hdr.original_sc = msg->hdr.original_sc;
io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
CTL_FLAG_IO_ACTIVE;
/*
@@ -662,18 +909,23 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
*
* XXX KDM add another flag that is more specific.
*/
- if (softc->ha_mode == CTL_HA_MODE_SER_ONLY)
+ if (softc->ha_mode != CTL_HA_MODE_XFER)
io->io_hdr.flags |= CTL_FLAG_INT_COPY;
- io->io_hdr.nexus = msg_info.hdr.nexus;
+ io->io_hdr.nexus = msg->hdr.nexus;
#if 0
printf("port %u, iid %u, lun %u\n",
io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.initid,
io->io_hdr.nexus.targ_lun);
#endif
- io->scsiio.tag_num = msg_info.scsi.tag_num;
- io->scsiio.tag_type = msg_info.scsi.tag_type;
- memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
+ io->scsiio.tag_num = msg->scsi.tag_num;
+ io->scsiio.tag_type = msg->scsi.tag_type;
+#ifdef CTL_TIME_IO
+ io->io_hdr.start_time = time_uptime;
+ getbintime(&io->io_hdr.start_bt);
+#endif /* CTL_TIME_IO */
+ io->scsiio.cdb_len = msg->scsi.cdb_len;
+ memcpy(io->scsiio.cdb, msg->scsi.cdb,
CTL_MAX_CDBLEN);
if (softc->ha_mode == CTL_HA_MODE_XFER) {
const struct ctl_cmd_entry *entry;
@@ -691,7 +943,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
struct ctl_sg_entry *sgl;
int i, j;
- io = msg_info.hdr.original_sc;
+ io = msg->hdr.original_sc;
if (io == NULL) {
printf("%s: original_sc == NULL!\n", __func__);
/* XXX KDM do something here */
@@ -703,97 +955,66 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
* Keep track of this, we need to send it back over
* when the datamove is complete.
*/
- io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
-
- if (msg_info.dt.sg_sequence == 0) {
- /*
- * XXX KDM we use the preallocated S/G list
- * here, but we'll need to change this to
- * dynamic allocation if we need larger S/G
- * lists.
- */
- if (msg_info.dt.kern_sg_entries >
- sizeof(io->io_hdr.remote_sglist) /
- sizeof(io->io_hdr.remote_sglist[0])) {
- printf("%s: number of S/G entries "
- "needed %u > allocated num %zd\n",
- __func__,
- msg_info.dt.kern_sg_entries,
- sizeof(io->io_hdr.remote_sglist)/
- sizeof(io->io_hdr.remote_sglist[0]));
-
- /*
- * XXX KDM send a message back to
- * the other side to shut down the
- * DMA. The error will come back
- * through via the normal channel.
- */
- break;
- }
- sgl = io->io_hdr.remote_sglist;
- memset(sgl, 0,
- sizeof(io->io_hdr.remote_sglist));
+ io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
+
+ if (msg->dt.sg_sequence == 0) {
+ i = msg->dt.kern_sg_entries +
+ io->scsiio.kern_data_len /
+ CTL_HA_DATAMOVE_SEGMENT + 1;
+ sgl = malloc(sizeof(*sgl) * i, M_CTL,
+ M_WAITOK | M_ZERO);
+ io->io_hdr.remote_sglist = sgl;
+ io->io_hdr.local_sglist =
+ &sgl[msg->dt.kern_sg_entries];
io->scsiio.kern_data_ptr = (uint8_t *)sgl;
io->scsiio.kern_sg_entries =
- msg_info.dt.kern_sg_entries;
+ msg->dt.kern_sg_entries;
io->scsiio.rem_sg_entries =
- msg_info.dt.kern_sg_entries;
+ msg->dt.kern_sg_entries;
io->scsiio.kern_data_len =
- msg_info.dt.kern_data_len;
+ msg->dt.kern_data_len;
io->scsiio.kern_total_len =
- msg_info.dt.kern_total_len;
+ msg->dt.kern_total_len;
io->scsiio.kern_data_resid =
- msg_info.dt.kern_data_resid;
+ msg->dt.kern_data_resid;
io->scsiio.kern_rel_offset =
- msg_info.dt.kern_rel_offset;
- /*
- * Clear out per-DMA flags.
- */
- io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
- /*
- * Add per-DMA flags that are set for this
- * particular DMA request.
- */
- io->io_hdr.flags |= msg_info.dt.flags &
- CTL_FLAG_RDMA_MASK;
+ msg->dt.kern_rel_offset;
+ io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR;
+ io->io_hdr.flags |= msg->dt.flags &
+ CTL_FLAG_BUS_ADDR;
} else
sgl = (struct ctl_sg_entry *)
io->scsiio.kern_data_ptr;
- for (i = msg_info.dt.sent_sg_entries, j = 0;
- i < (msg_info.dt.sent_sg_entries +
- msg_info.dt.cur_sg_entries); i++, j++) {
- sgl[i].addr = msg_info.dt.sg_list[j].addr;
- sgl[i].len = msg_info.dt.sg_list[j].len;
+ for (i = msg->dt.sent_sg_entries, j = 0;
+ i < (msg->dt.sent_sg_entries +
+ msg->dt.cur_sg_entries); i++, j++) {
+ sgl[i].addr = msg->dt.sg_list[j].addr;
+ sgl[i].len = msg->dt.sg_list[j].len;
#if 0
printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
__func__,
- msg_info.dt.sg_list[j].addr,
- msg_info.dt.sg_list[j].len,
+ msg->dt.sg_list[j].addr,
+ msg->dt.sg_list[j].len,
sgl[i].addr, sgl[i].len, j, i);
#endif
}
-#if 0
- memcpy(&sgl[msg_info.dt.sent_sg_entries],
- msg_info.dt.sg_list,
- sizeof(*sgl) * msg_info.dt.cur_sg_entries);
-#endif
/*
* If this is the last piece of the I/O, we've got
* the full S/G list. Queue processing in the thread.
* Otherwise wait for the next piece.
*/
- if (msg_info.dt.sg_last != 0)
+ if (msg->dt.sg_last != 0)
ctl_enqueue_isc(io);
break;
}
/* Performed on the Serializing (primary) SC, XFER mode only */
case CTL_MSG_DATAMOVE_DONE: {
- if (msg_info.hdr.serializing_sc == NULL) {
+ if (msg->hdr.serializing_sc == NULL) {
printf("%s: serializing_sc == NULL!\n",
__func__);
/* XXX KDM now what? */
@@ -804,33 +1025,35 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
* there was a failure, so we can return status
* back to the initiator.
*/
- io = msg_info.hdr.serializing_sc;
+ io = msg->hdr.serializing_sc;
io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
- io->io_hdr.status = msg_info.hdr.status;
- io->scsiio.scsi_status = msg_info.scsi.scsi_status;
- io->scsiio.sense_len = msg_info.scsi.sense_len;
- io->scsiio.sense_residual =msg_info.scsi.sense_residual;
- io->io_hdr.port_status = msg_info.scsi.fetd_status;
- io->scsiio.residual = msg_info.scsi.residual;
- memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
- sizeof(io->scsiio.sense_data));
+ io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
+ io->io_hdr.port_status = msg->scsi.fetd_status;
+ io->scsiio.residual = msg->scsi.residual;
+ if (msg->hdr.status != CTL_STATUS_NONE) {
+ io->io_hdr.status = msg->hdr.status;
+ io->scsiio.scsi_status = msg->scsi.scsi_status;
+ io->scsiio.sense_len = msg->scsi.sense_len;
+ io->scsiio.sense_residual =msg->scsi.sense_residual;
+ memcpy(&io->scsiio.sense_data,
+ &msg->scsi.sense_data,
+ msg->scsi.sense_len);
+ }
ctl_enqueue_isc(io);
break;
}
/* Preformed on Originating SC, SER_ONLY mode */
case CTL_MSG_R2R:
- io = msg_info.hdr.original_sc;
+ io = msg->hdr.original_sc;
if (io == NULL) {
- printf("%s: Major Bummer\n", __func__);
- return;
- } else {
-#if 0
- printf("pOrig %x\n",(int) ctsio);
-#endif
+ printf("%s: original_sc == NULL!\n",
+ __func__);
+ break;
}
+ io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
io->io_hdr.msg_type = CTL_MSG_R2R;
- io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
+ io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
ctl_enqueue_isc(io);
break;
@@ -842,22 +1065,20 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
*/
case CTL_MSG_FINISH_IO:
if (softc->ha_mode == CTL_HA_MODE_XFER)
- ctl_isc_handler_finish_xfer(softc,
- &msg_info);
+ ctl_isc_handler_finish_xfer(softc, msg);
else
- ctl_isc_handler_finish_ser_only(softc,
- &msg_info);
+ ctl_isc_handler_finish_ser_only(softc, msg);
break;
/* Preformed on Originating SC */
case CTL_MSG_BAD_JUJU:
- io = msg_info.hdr.original_sc;
+ io = msg->hdr.original_sc;
if (io == NULL) {
printf("%s: Bad JUJU!, original_sc is NULL!\n",
__func__);
break;
}
- ctl_copy_sense_data(&msg_info, io);
+ ctl_copy_sense_data(msg, io);
/*
* IO should have already been cleaned up on other
* SC so clear this flag so we won't send a message
@@ -866,7 +1087,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
- /* io = msg_info.hdr.serializing_sc; */
+ /* io = msg->hdr.serializing_sc; */
io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
ctl_enqueue_isc(io);
break;
@@ -874,91 +1095,99 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
/* Handle resets sent from the other side */
case CTL_MSG_MANAGE_TASKS: {
struct ctl_taskio *taskio;
- taskio = (struct ctl_taskio *)ctl_alloc_io_nowait(
+ taskio = (struct ctl_taskio *)ctl_alloc_io(
softc->othersc_pool);
- if (taskio == NULL) {
- printf("ctl_isc_event_handler: can't allocate "
- "ctl_io!\n");
- /* Bad Juju */
- /* should I just call the proper reset func
- here??? */
- goto bailout;
- }
ctl_zero_io((union ctl_io *)taskio);
taskio->io_hdr.io_type = CTL_IO_TASK;
taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
- taskio->io_hdr.nexus = msg_info.hdr.nexus;
- taskio->task_action = msg_info.task.task_action;
- taskio->tag_num = msg_info.task.tag_num;
- taskio->tag_type = msg_info.task.tag_type;
+ taskio->io_hdr.nexus = msg->hdr.nexus;
+ taskio->task_action = msg->task.task_action;
+ taskio->tag_num = msg->task.tag_num;
+ taskio->tag_type = msg->task.tag_type;
#ifdef CTL_TIME_IO
taskio->io_hdr.start_time = time_uptime;
getbintime(&taskio->io_hdr.start_bt);
-#if 0
- cs_prof_gettime(&taskio->io_hdr.start_ticks);
-#endif
#endif /* CTL_TIME_IO */
ctl_run_task((union ctl_io *)taskio);
break;
}
/* Persistent Reserve action which needs attention */
case CTL_MSG_PERS_ACTION:
- presio = (struct ctl_prio *)ctl_alloc_io_nowait(
+ presio = (struct ctl_prio *)ctl_alloc_io(
softc->othersc_pool);
- if (presio == NULL) {
- printf("ctl_isc_event_handler: can't allocate "
- "ctl_io!\n");
- /* Bad Juju */
- /* Need to set busy and send msg back */
- goto bailout;
- }
ctl_zero_io((union ctl_io *)presio);
presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
- presio->pr_msg = msg_info.pr;
+ presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
+ presio->io_hdr.nexus = msg->hdr.nexus;
+ presio->pr_msg = msg->pr;
ctl_enqueue_isc((union ctl_io *)presio);
break;
- case CTL_MSG_SYNC_FE:
- rcv_sync_msg = 1;
+ case CTL_MSG_UA:
+ ctl_isc_ua(softc, msg, param);
+ break;
+ case CTL_MSG_PORT_SYNC:
+ ctl_isc_port_sync(softc, msg, param);
+ break;
+ case CTL_MSG_LUN_SYNC:
+ ctl_isc_lun_sync(softc, msg, param);
break;
default:
- printf("How did I get here?\n");
+ printf("Received HA message of unknown type %d\n",
+ msg->hdr.msg_type);
+ break;
}
- } else if (event == CTL_HA_EVT_MSG_SENT) {
- if (param != CTL_HA_STATUS_SUCCESS) {
- printf("Bad status from ctl_ha_msg_send status %d\n",
- param);
+ if (msg != &msgbuf)
+ free(msg, M_CTL);
+ } else if (event == CTL_HA_EVT_LINK_CHANGE) {
+ printf("CTL: HA link status changed from %d to %d\n",
+ softc->ha_link, param);
+ if (param == softc->ha_link)
+ return;
+ if (softc->ha_link == CTL_HA_LINK_ONLINE) {
+ softc->ha_link = param;
+ ctl_isc_ha_link_down(softc);
+ } else {
+ softc->ha_link = param;
+ if (softc->ha_link == CTL_HA_LINK_ONLINE)
+ ctl_isc_ha_link_up(softc);
}
return;
- } else if (event == CTL_HA_EVT_DISCONNECT) {
- printf("CTL: Got a disconnect from Isc\n");
- return;
} else {
printf("ctl_isc_event_handler: Unknown event %d\n", event);
return;
}
-
-bailout:
- return;
}
static void
ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
{
- struct scsi_sense_data *sense;
- sense = &dest->scsiio.sense_data;
- bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
+ memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data,
+ src->scsi.sense_len);
dest->scsiio.scsi_status = src->scsi.scsi_status;
dest->scsiio.sense_len = src->scsi.sense_len;
dest->io_hdr.status = src->hdr.status;
}
-#endif
+
+static void
+ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest)
+{
+
+ memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data,
+ src->scsiio.sense_len);
+ dest->scsi.scsi_status = src->scsiio.scsi_status;
+ dest->scsi.sense_len = src->scsiio.sense_len;
+ dest->hdr.status = src->io_hdr.status;
+}
static void
ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
{
+ struct ctl_softc *softc = lun->ctl_softc;
ctl_ua_type *pu;
+ if (initidx < softc->init_min || initidx >= softc->init_max)
+ return;
mtx_assert(&lun->lun_lock, MA_OWNED);
pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
if (pu == NULL)
@@ -969,10 +1198,11 @@ ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
static void
ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
{
+ struct ctl_softc *softc = lun->ctl_softc;
int i, j;
mtx_assert(&lun->lun_lock, MA_OWNED);
- for (i = 0; i < CTL_MAX_PORTS; i++) {
+ for (i = softc->port_min; i < softc->port_max; i++) {
if (lun->pending_ua[i] == NULL)
continue;
for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
@@ -986,8 +1216,11 @@ ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
static void
ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
{
+ struct ctl_softc *softc = lun->ctl_softc;
ctl_ua_type *pu;
+ if (initidx < softc->init_min || initidx >= softc->init_max)
+ return;
mtx_assert(&lun->lun_lock, MA_OWNED);
pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
if (pu == NULL)
@@ -998,10 +1231,11 @@ ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
static void
ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
{
+ struct ctl_softc *softc = lun->ctl_softc;
int i, j;
mtx_assert(&lun->lun_lock, MA_OWNED);
- for (i = 0; i < CTL_MAX_PORTS; i++) {
+ for (i = softc->port_min; i < softc->port_max; i++) {
if (lun->pending_ua[i] == NULL)
continue;
for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
@@ -1027,17 +1261,14 @@ ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
}
static int
-ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS)
+ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS)
{
struct ctl_softc *softc = (struct ctl_softc *)arg1;
struct ctl_lun *lun;
+ struct ctl_lun_req ireq;
int error, value;
- if (softc->flags & CTL_FLAG_ACTIVE_SHELF)
- value = 0;
- else
- value = 1;
-
+ value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1;
error = sysctl_handle_int(oidp, &value, 0, req);
if ((error != 0) || (req->newptr == NULL))
return (error);
@@ -1048,9 +1279,17 @@ ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS)
else
softc->flags &= ~CTL_FLAG_ACTIVE_SHELF;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- mtx_lock(&lun->lun_lock);
- ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
- mtx_unlock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+ bzero(&ireq, sizeof(ireq));
+ ireq.reqtype = CTL_LUNREQ_MODIFY;
+ ireq.reqdata.modify.lun_id = lun->lun;
+ lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0,
+ curthread);
+ if (ireq.status != CTL_LUN_OK) {
+ printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n",
+ __func__, ireq.status, ireq.error_str);
+ }
+ mtx_lock(&softc->ctl_lock);
}
mtx_unlock(&softc->ctl_lock);
return (0);
@@ -1062,12 +1301,8 @@ ctl_init(void)
struct ctl_softc *softc;
void *other_pool;
int i, error, retval;
- //int isc_retval;
retval = 0;
- ctl_pause_rtr = 0;
- rcv_sync_msg = 0;
-
control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
M_WAITOK | M_ZERO);
softc = control_softc;
@@ -1077,14 +1312,6 @@ ctl_init(void)
softc->dev->si_drv1 = softc;
- /*
- * By default, return a "bad LUN" peripheral qualifier for unknown
- * LUNs. The user can override this default using the tunable or
- * sysctl. See the comment in ctl_inquiry_std() for more details.
- */
- softc->inquiry_pq_no_lun = 1;
- TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun",
- &softc->inquiry_pq_no_lun);
sysctl_ctx_init(&softc->sysctl_ctx);
softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
@@ -1098,12 +1325,6 @@ ctl_init(void)
return (ENOMEM);
}
- SYSCTL_ADD_INT(&softc->sysctl_ctx,
- SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
- "inquiry_pq_no_lun", CTLFLAG_RW,
- &softc->inquiry_pq_no_lun, 0,
- "Report no lun possible for invalid LUNs");
-
mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
@@ -1115,6 +1336,10 @@ ctl_init(void)
*/
softc->flags = CTL_FLAG_REAL_SYNC;
+ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0,
+ "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)");
+
/*
* In Copan's HA scheme, the "master" and "slave" roles are
* figured out through the slot the controller is in. Although it
@@ -1123,13 +1348,22 @@ ctl_init(void)
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0,
"HA head ID (0 - no HA)");
- if (softc->ha_id == 0) {
+ if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) {
softc->flags |= CTL_FLAG_ACTIVE_SHELF;
softc->is_single = 1;
- softc->port_offset = 0;
- } else
- softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS;
- softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT;
+ softc->port_cnt = CTL_MAX_PORTS;
+ softc->port_min = 0;
+ } else {
+ softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS;
+ softc->port_min = (softc->ha_id - 1) * softc->port_cnt;
+ }
+ softc->port_max = softc->port_min + softc->port_cnt;
+ softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT;
+ softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT;
+
+ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0,
+ "HA link state (0 - offline, 1 - unknown, 2 - online)");
STAILQ_INIT(&softc->lun_list);
STAILQ_INIT(&softc->pending_lun_queue);
@@ -1186,8 +1420,21 @@ ctl_init(void)
}
SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
- OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN,
- softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head");
+ OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN,
+ softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
+
+ if (softc->is_single == 0) {
+ ctl_frontend_register(&ha_frontend);
+ if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_init: ctl_ha_msg_init failed.\n");
+ softc->is_single = 1;
+ } else
+ if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
+ != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_init: ctl_ha_msg_register failed.\n");
+ softc->is_single = 1;
+ }
+ }
return (0);
}
@@ -1199,6 +1446,17 @@ ctl_shutdown(void)
softc = (struct ctl_softc *)control_softc;
+ if (softc->is_single == 0) {
+ if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL)
+ != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_shutdown: ctl_ha_msg_deregister failed.\n");
+ }
+ if (ctl_ha_msg_shutdown(softc) != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_shutdown: ctl_ha_msg_shutdown failed.\n");
+ }
+ ctl_frontend_deregister(&ha_frontend);
+ }
+
mtx_lock(&softc->ctl_lock);
/*
@@ -1258,130 +1516,6 @@ ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
return (0);
}
-int
-ctl_port_enable(ctl_port_type port_type)
-{
- struct ctl_softc *softc = control_softc;
- struct ctl_port *port;
-
- if (softc->is_single == 0) {
- union ctl_ha_msg msg_info;
- int isc_retval;
-
-#if 0
- printf("%s: HA mode, synchronizing frontend enable\n",
- __func__);
-#endif
- msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
- printf("Sync msg send error retval %d\n", isc_retval);
- }
- if (!rcv_sync_msg) {
- isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 1);
- }
-#if 0
- printf("CTL:Frontend Enable\n");
- } else {
- printf("%s: single mode, skipping frontend synchronization\n",
- __func__);
-#endif
- }
-
- STAILQ_FOREACH(port, &softc->port_list, links) {
- if (port_type & port->port_type)
- {
-#if 0
- printf("port %d\n", port->targ_port);
-#endif
- ctl_port_online(port);
- }
- }
-
- return (0);
-}
-
-int
-ctl_port_disable(ctl_port_type port_type)
-{
- struct ctl_softc *softc;
- struct ctl_port *port;
-
- softc = control_softc;
-
- STAILQ_FOREACH(port, &softc->port_list, links) {
- if (port_type & port->port_type)
- ctl_port_offline(port);
- }
-
- return (0);
-}
-
-/*
- * Returns 0 for success, 1 for failure.
- * Currently the only failure mode is if there aren't enough entries
- * allocated. So, in case of a failure, look at num_entries_dropped,
- * reallocate and try again.
- */
-int
-ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
- int *num_entries_filled, int *num_entries_dropped,
- ctl_port_type port_type, int no_virtual)
-{
- struct ctl_softc *softc;
- struct ctl_port *port;
- int entries_dropped, entries_filled;
- int retval;
- int i;
-
- softc = control_softc;
-
- retval = 0;
- entries_filled = 0;
- entries_dropped = 0;
-
- i = 0;
- mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(port, &softc->port_list, links) {
- struct ctl_port_entry *entry;
-
- if ((port->port_type & port_type) == 0)
- continue;
-
- if ((no_virtual != 0)
- && (port->virtual_port != 0))
- continue;
-
- if (entries_filled >= num_entries_alloced) {
- entries_dropped++;
- continue;
- }
- entry = &entries[i];
-
- entry->port_type = port->port_type;
- strlcpy(entry->port_name, port->port_name,
- sizeof(entry->port_name));
- entry->physical_port = port->physical_port;
- entry->virtual_port = port->virtual_port;
- entry->wwnn = port->wwnn;
- entry->wwpn = port->wwpn;
-
- i++;
- entries_filled++;
- }
-
- mtx_unlock(&softc->ctl_lock);
-
- if (entries_dropped > 0)
- retval = 1;
-
- *num_entries_dropped = entries_dropped;
- *num_entries_filled = entries_filled;
-
- return (retval);
-}
-
/*
* Remove an initiator by port number and initiator ID.
* Returns 0 for success, -1 for failure.
@@ -1591,46 +1725,68 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
struct ctl_softc *softc;
union ctl_ha_msg msg_info;
struct ctl_lun *lun;
+ const struct ctl_cmd_entry *entry;
int retval = 0;
uint32_t targ_lun;
softc = control_softc;
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
- lun = softc->ctl_luns[targ_lun];
- if (lun==NULL)
- {
+ if ((targ_lun < CTL_MAX_LUNS) &&
+ ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
+ /*
+ * If the LUN is invalid, pretend that it doesn't exist.
+ * It will go away as soon as all pending I/O has been
+ * completed.
+ */
+ mtx_lock(&lun->lun_lock);
+ if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&lun->lun_lock);
+ lun = NULL;
+ }
+ } else
+ lun = NULL;
+ if (lun == NULL) {
/*
* Why isn't LUN defined? The other side wouldn't
* send a cmd if the LUN is undefined.
*/
printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
- /* "Logical unit not supported" */
- ctl_set_sense_data(&msg_info.scsi.sense_data,
- lun,
- /*sense_format*/SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
- /*asc*/ 0x25,
- /*ascq*/ 0x00,
- SSD_ELEM_NONE);
-
- msg_info.scsi.sense_len = SSD_FULL_SIZE;
- msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
- msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ ctl_set_unsupported_lun(ctsio);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
return(1);
+ }
+ entry = ctl_get_cmd_entry(ctsio, NULL);
+ if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
+ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
+ msg_info.hdr.serializing_sc = NULL;
+ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
+ return(1);
}
- mtx_lock(&lun->lun_lock);
- TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
+ ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun;
+
+ /*
+ * Every I/O goes into the OOA queue for a
+ * particular LUN, and stays there until completion.
+ */
+#ifdef CTL_TIME_IO
+ if (TAILQ_EMPTY(&lun->ooa_queue))
+ lun->idle_time += getsbinuptime() - lun->last_busy;
+#endif
+ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
(union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
@@ -1639,107 +1795,67 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
blocked_links);
+ mtx_unlock(&lun->lun_lock);
break;
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP:
if (softc->ha_mode == CTL_HA_MODE_XFER) {
ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
ctl_enqueue_rtr((union ctl_io *)ctsio);
+ mtx_unlock(&lun->lun_lock);
} else {
+ ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+ mtx_unlock(&lun->lun_lock);
/* send msg back to other side */
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
msg_info.hdr.msg_type = CTL_MSG_R2R;
-#if 0
- printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
-#endif
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.hdr), M_WAITOK);
}
break;
case CTL_ACTION_OVERLAP:
- /* OVERLAPPED COMMANDS ATTEMPTED */
- ctl_set_sense_data(&msg_info.scsi.sense_data,
- lun,
- /*sense_format*/SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
- /*asc*/ 0x4E,
- /*ascq*/ 0x00,
- SSD_ELEM_NONE);
-
- msg_info.scsi.sense_len = SSD_FULL_SIZE;
- msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
- msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ mtx_unlock(&lun->lun_lock);
+ retval = 1;
+
+ ctl_set_overlapped_cmd(ctsio);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
- printf("BAD JUJU:Major Bummer Overlap\n");
-#endif
- TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
- retval = 1;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
break;
case CTL_ACTION_OVERLAP_TAG:
- /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
- ctl_set_sense_data(&msg_info.scsi.sense_data,
- lun,
- /*sense_format*/SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
- /*asc*/ 0x4D,
- /*ascq*/ ctsio->tag_num & 0xff,
- SSD_ELEM_NONE);
-
- msg_info.scsi.sense_len = SSD_FULL_SIZE;
- msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
- msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ mtx_unlock(&lun->lun_lock);
+ retval = 1;
+ ctl_set_overlapped_tag(ctsio, ctsio->tag_num);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
- printf("BAD JUJU:Major Bummer Overlap Tag\n");
-#endif
- TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
- retval = 1;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
break;
case CTL_ACTION_ERROR:
default:
- /* "Internal target failure" */
- ctl_set_sense_data(&msg_info.scsi.sense_data,
- lun,
- /*sense_format*/SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
- /*asc*/ 0x44,
- /*ascq*/ 0x00,
- SSD_ELEM_NONE);
-
- msg_info.scsi.sense_len = SSD_FULL_SIZE;
- msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
- msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ mtx_unlock(&lun->lun_lock);
+ retval = 1;
+
+ ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
- printf("BAD JUJU:Major Bummer HW Error\n");
-#endif
- TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
- retval = 1;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
break;
}
- mtx_unlock(&lun->lun_lock);
return (retval);
}
@@ -2000,9 +2116,12 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
STAILQ_FOREACH(port, &softc->port_list, links) {
int action, done;
+ if (port->targ_port < softc->port_min ||
+ port->targ_port >= softc->port_max)
+ continue;
+
action = 0;
done = 0;
-
if ((entry->port_type == CTL_PORT_NONE)
&& (entry->targ_port == port->targ_port)) {
/*
@@ -2032,30 +2151,29 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
}
- if (action != 0) {
- /*
- * XXX KDM we have to drop the lock here,
- * because the online/offline operations
- * can potentially block. We need to
- * reference count the frontends so they
- * can't go away,
- */
- mtx_unlock(&softc->ctl_lock);
-
- if (cmd == CTL_ENABLE_PORT) {
- ctl_port_online(port);
- } else if (cmd == CTL_DISABLE_PORT) {
- ctl_port_offline(port);
- }
+ if (action == 0)
+ continue;
+ /*
+ * XXX KDM we have to drop the lock here, because
+ * the online/offline operations can potentially
+ * block. We need to reference count the frontends
+ * so they can't go away,
+ */
+ if (cmd == CTL_ENABLE_PORT) {
+ mtx_unlock(&softc->ctl_lock);
+ ctl_port_online(port);
mtx_lock(&softc->ctl_lock);
-
- if (cmd == CTL_SET_PORT_WWNS)
- ctl_port_set_wwns(port,
- (entry->flags & CTL_PORT_WWNN_VALID) ?
- 1 : 0, entry->wwnn,
- (entry->flags & CTL_PORT_WWPN_VALID) ?
- 1 : 0, entry->wwpn);
+ } else if (cmd == CTL_DISABLE_PORT) {
+ mtx_unlock(&softc->ctl_lock);
+ ctl_port_offline(port);
+ mtx_lock(&softc->ctl_lock);
+ } else if (cmd == CTL_SET_PORT_WWNS) {
+ ctl_port_set_wwns(port,
+ (entry->flags & CTL_PORT_WWNN_VALID) ?
+ 1 : 0, entry->wwnn,
+ (entry->flags & CTL_PORT_WWPN_VALID) ?
+ 1 : 0, entry->wwpn);
}
if (done != 0)
break;
@@ -2553,7 +2671,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
|| ((lun->flags & CTL_LUN_DISABLED) != 0))
continue;
- for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
+ for (j = 0; j < CTL_MAX_PORTS; j++) {
if (lun->pr_keys[j] == NULL)
continue;
for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
@@ -2845,7 +2963,10 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
}
- retval = fe->ioctl(dev, cmd, addr, flag, td);
+ if (fe->ioctl)
+ retval = fe->ioctl(dev, cmd, addr, flag, td);
+ else
+ retval = ENODEV;
if (req->num_args > 0) {
ctl_copyout_args(req->num_args, req->kern_args);
@@ -3004,7 +3125,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct ctl_port *port;
mtx_lock(&softc->ctl_lock);
- if (lm->port >= CTL_MAX_PORTS ||
+ if (lm->port < softc->port_min ||
+ lm->port >= softc->port_max ||
(port = softc->ctl_ports[lm->port]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return (ENXIO);
@@ -3067,30 +3189,9 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
uint32_t
ctl_get_initindex(struct ctl_nexus *nexus)
{
- if (nexus->targ_port < CTL_MAX_PORTS)
- return (nexus->initid +
- (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
- else
- return (nexus->initid +
- ((nexus->targ_port - CTL_MAX_PORTS) *
- CTL_MAX_INIT_PER_PORT));
-}
-
-uint32_t
-ctl_get_resindex(struct ctl_nexus *nexus)
-{
return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
}
-uint32_t
-ctl_port_idx(int port_num)
-{
- if (port_num < CTL_MAX_PORTS)
- return(port_num);
- else
- return(port_num - CTL_MAX_PORTS);
-}
-
int
ctl_lun_map_init(struct ctl_port *port)
{
@@ -3105,10 +3206,12 @@ ctl_lun_map_init(struct ctl_port *port)
return (ENOMEM);
for (i = 0; i < CTL_MAX_LUNS; i++)
port->lun_map[i] = UINT32_MAX;
- if (port->status & CTL_PORT_STATUS_ONLINE &&
- port->lun_disable != NULL) {
- STAILQ_FOREACH(lun, &softc->lun_list, links)
- port->lun_disable(port->targ_lun_arg, lun->lun);
+ if (port->status & CTL_PORT_STATUS_ONLINE) {
+ if (port->lun_disable != NULL) {
+ STAILQ_FOREACH(lun, &softc->lun_list, links)
+ port->lun_disable(port->targ_lun_arg, lun->lun);
+ }
+ ctl_isc_announce_port(port);
}
return (0);
}
@@ -3123,10 +3226,12 @@ ctl_lun_map_deinit(struct ctl_port *port)
return (0);
free(port->lun_map, M_CTL);
port->lun_map = NULL;
- if (port->status & CTL_PORT_STATUS_ONLINE &&
- port->lun_enable != NULL) {
- STAILQ_FOREACH(lun, &softc->lun_list, links)
- port->lun_enable(port->targ_lun_arg, lun->lun);
+ if (port->status & CTL_PORT_STATUS_ONLINE) {
+ if (port->lun_enable != NULL) {
+ STAILQ_FOREACH(lun, &softc->lun_list, links)
+ port->lun_enable(port->targ_lun_arg, lun->lun);
+ }
+ ctl_isc_announce_port(port);
}
return (0);
}
@@ -3144,9 +3249,11 @@ ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
}
old = port->lun_map[plun];
port->lun_map[plun] = glun;
- if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS &&
- port->lun_enable != NULL)
- port->lun_enable(port->targ_lun_arg, plun);
+ if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) {
+ if (port->lun_enable != NULL)
+ port->lun_enable(port->targ_lun_arg, plun);
+ ctl_isc_announce_port(port);
+ }
return (0);
}
@@ -3159,9 +3266,11 @@ ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
return (0);
old = port->lun_map[plun];
port->lun_map[plun] = UINT32_MAX;
- if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS &&
- port->lun_disable != NULL)
- port->lun_disable(port->targ_lun_arg, plun);
+ if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) {
+ if (port->lun_disable != NULL)
+ port->lun_disable(port->targ_lun_arg, plun);
+ ctl_isc_announce_port(port);
+ }
return (0);
}
@@ -3195,34 +3304,19 @@ ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
static struct ctl_port *
ctl_io_port(struct ctl_io_hdr *io_hdr)
{
- int port_num;
- port_num = io_hdr->nexus.targ_port;
- return (control_softc->ctl_ports[ctl_port_idx(port_num)]);
+ return (control_softc->ctl_ports[io_hdr->nexus.targ_port]);
}
-/*
- * Note: This only works for bitmask sizes that are at least 32 bits, and
- * that are a power of 2.
- */
int
-ctl_ffz(uint32_t *mask, uint32_t size)
+ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
{
- uint32_t num_chunks, num_pieces;
- int i, j;
-
- num_chunks = (size >> 5);
- if (num_chunks == 0)
- num_chunks++;
- num_pieces = MIN((sizeof(uint32_t) * 8), size);
+ int i;
- for (i = 0; i < num_chunks; i++) {
- for (j = 0; j < num_pieces; j++) {
- if ((mask[i] & (1 << j)) == 0)
- return ((i << 5) + j);
- }
+ for (i = first; i < last; i++) {
+ if ((mask[i / 32] & (1 << (i % 32))) == 0)
+ return (i);
}
-
return (-1);
}
@@ -4121,7 +4215,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
}
lun_number = be_lun->req_lun_id;
} else {
- lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
+ lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS);
if (lun_number == -1) {
mtx_unlock(&ctl_softc->ctl_lock);
printf("ctl: can't allocate LUN, out of LUNs\n");
@@ -4244,7 +4338,7 @@ ctl_free_lun(struct ctl_lun *lun)
free(lun->lun_devid, M_CTL);
for (i = 0; i < CTL_MAX_PORTS; i++)
free(lun->pending_ua[i], M_CTL);
- for (i = 0; i < 2 * CTL_MAX_PORTS; i++)
+ for (i = 0; i < CTL_MAX_PORTS; i++)
free(lun->pr_keys[i], M_CTL);
free(lun->write_buffer, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
@@ -4333,6 +4427,7 @@ ctl_enable_lun(struct ctl_be_lun *be_lun)
}
mtx_unlock(&softc->ctl_lock);
+ ctl_isc_announce_lun(lun);
return (0);
}
@@ -4382,6 +4477,7 @@ ctl_disable_lun(struct ctl_be_lun *be_lun)
}
mtx_unlock(&softc->ctl_lock);
+ ctl_isc_announce_lun(lun);
return (0);
}
@@ -4431,6 +4527,32 @@ ctl_lun_online(struct ctl_be_lun *be_lun)
}
int
+ctl_lun_primary(struct ctl_be_lun *be_lun)
+{
+ struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&lun->lun_lock);
+ lun->flags |= CTL_LUN_PRIMARY_SC;
+ mtx_unlock(&lun->lun_lock);
+ ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
+ ctl_isc_announce_lun(lun);
+ return (0);
+}
+
+int
+ctl_lun_secondary(struct ctl_be_lun *be_lun)
+{
+ struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&lun->lun_lock);
+ lun->flags &= ~CTL_LUN_PRIMARY_SC;
+ mtx_unlock(&lun->lun_lock);
+ ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
+ ctl_isc_announce_lun(lun);
+ return (0);
+}
+
+int
ctl_invalidate_lun(struct ctl_be_lun *be_lun)
{
struct ctl_softc *softc;
@@ -4495,10 +4617,25 @@ void
ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
{
struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
+ union ctl_ha_msg msg;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED);
mtx_unlock(&lun->lun_lock);
+ if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+ /* Send msg to other side. */
+ bzero(&msg.ua, sizeof(msg.ua));
+ msg.hdr.msg_type = CTL_MSG_UA;
+ msg.hdr.nexus.initid = -1;
+ msg.hdr.nexus.targ_port = -1;
+ msg.hdr.nexus.targ_lun = lun->lun;
+ msg.hdr.nexus.targ_mapped_lun = lun->lun;
+ msg.ua.ua_all = 1;
+ msg.ua.ua_set = 1;
+ msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
+ M_WAITOK);
+ }
}
/*
@@ -4531,6 +4668,8 @@ ctl_config_move_done(union ctl_io *io)
io->io_hdr.port_status);
}
+ if (ctl_debug & CTL_DEBUG_CDB_DATA)
+ ctl_data_print(io);
if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ||
((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
(io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ||
@@ -4557,8 +4696,6 @@ ctl_config_move_done(union ctl_io *io)
*
* - Call some other function once the data is in?
*/
- if (ctl_debug & CTL_DEBUG_CDB_DATA)
- ctl_data_print(io);
/*
* XXX KDM call ctl_scsiio() again for now, and check flag
@@ -4681,7 +4818,7 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
switch (ctsio->cdb[0]) {
@@ -4771,7 +4908,7 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_reserve\n"));
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
switch (ctsio->cdb[0]) {
@@ -4887,7 +5024,7 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
&& ((cdb->how & SSS_START)==0)) {
uint32_t residx;
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
if (ctl_get_prkey(lun, residx) == 0
|| (lun->pr_res_idx!=residx && lun->res_type < 4)) {
@@ -6853,7 +6990,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
{
struct scsi_maintenance_in *cdb;
int retval;
- int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os;
+ int alloc_len, ext, total_len = 0, g, pc, pg, gs, os;
int num_target_port_groups, num_target_ports;
struct ctl_lun *lun;
struct ctl_softc *softc;
@@ -6909,8 +7046,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
total_len = sizeof(struct scsi_target_group_data);
total_len += sizeof(struct scsi_target_port_group_descriptor) *
num_target_port_groups +
- sizeof(struct scsi_target_port_descriptor) *
- num_target_ports * num_target_port_groups;
+ sizeof(struct scsi_target_port_descriptor) * num_target_ports;
alloc_len = scsi_4btoul(cdb->length);
@@ -6945,35 +7081,36 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
}
mtx_lock(&softc->ctl_lock);
- pg = softc->port_offset / CTL_MAX_PORTS;
- if (softc->flags & CTL_FLAG_ACTIVE_SHELF) {
- if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) {
- gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
- os = TPG_ASYMMETRIC_ACCESS_STANDBY;
- } else if (lun->flags & CTL_LUN_PRIMARY_SC) {
- gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
- os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- } else {
- gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
- }
- } else {
+ pg = softc->port_min / softc->port_cnt;
+ if (softc->ha_link == CTL_HA_LINK_OFFLINE)
+ gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
+ else if (softc->ha_link == CTL_HA_LINK_UNKNOWN)
+ gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
+ else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY)
gs = TPG_ASYMMETRIC_ACCESS_STANDBY;
+ else
+ gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+ if (lun->flags & CTL_LUN_PRIMARY_SC) {
+ os = gs;
+ gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
+ } else
os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
- }
for (g = 0; g < num_target_port_groups; g++) {
tpg_desc->pref_state = (g == pg) ? gs : os;
- tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP;
+ tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
+ TPG_U_SUP | TPG_T_SUP;
scsi_ulto2b(g + 1, tpg_desc->target_port_group);
tpg_desc->status = TPG_IMPLICIT;
pc = 0;
STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port->targ_port < g * softc->port_cnt ||
+ port->targ_port >= (g + 1) * softc->port_cnt)
+ continue;
if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
continue;
if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
continue;
- p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
- scsi_ulto2b(p, tpg_desc->descriptors[pc].
+ scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
relative_target_port_identifier);
pc++;
}
@@ -7342,7 +7479,7 @@ retry:
scsi_ulto4b(sizeof(struct scsi_per_res_key) *
lun->pr_key_count, res_keys->header.length);
- for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) {
if ((key = ctl_get_prkey(lun, i)) == 0)
continue;
@@ -7471,7 +7608,7 @@ retry:
scsi_ulto4b(lun->PRGeneration, res_status->header.generation);
res_desc = &res_status->desc[0];
- for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if ((key = ctl_get_prkey(lun, i)) == 0)
continue;
@@ -7485,8 +7622,7 @@ retry:
scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
res_desc->rel_trgt_port_id);
len = 0;
- port = softc->ctl_ports[
- ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)];
+ port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
if (port != NULL)
len = ctl_create_iid(port,
i % CTL_MAX_INIT_PER_PORT,
@@ -7516,15 +7652,6 @@ retry:
return (CTL_RETVAL_COMPLETE);
}
-static void
-ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua)
-{
- int off = lun->ctl_softc->persis_offset;
-
- if (residx >= off && residx < off + CTL_MAX_INITIATORS)
- ctl_est_ua(lun, residx - off, ua);
-}
-
/*
* Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
* it should return.
@@ -7536,10 +7663,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
struct scsi_per_res_out_parms* param)
{
union ctl_ha_msg persis_io;
- int retval, i;
- int isc_retval;
-
- retval = 0;
+ int i;
mtx_lock(&lun->lun_lock);
if (sa_res_key == 0) {
@@ -7574,18 +7698,20 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
* Unregister everybody else and build UA for
* them
*/
- for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for(i = 0; i < CTL_MAX_INITIATORS; i++) {
if (i == residx || ctl_get_prkey(lun, i) == 0)
continue;
ctl_clr_prkey(lun, i);
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
lun->pr_key_count = 1;
lun->res_type = type;
if (lun->res_type != SPR_TYPE_WR_EX_AR
&& lun->res_type != SPR_TYPE_EX_AC_AR)
lun->pr_res_idx = residx;
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
/* send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
@@ -7596,13 +7722,8 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned "
- "from ctl_ha_msg_send %d\n",
- isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
} else {
/* not all registrants */
mtx_unlock(&lun->lun_lock);
@@ -7643,14 +7764,14 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
return (1);
}
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (ctl_get_prkey(lun, i) != sa_res_key)
continue;
found = 1;
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
if (!found) {
mtx_unlock(&lun->lun_lock);
@@ -7659,6 +7780,9 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
+
/* send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7668,12 +7792,8 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
} else {
/* Reserved but not all registrants */
/* sa_res_key is res holder */
@@ -7718,18 +7838,18 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
* except don't unregister the res holder.
*/
- for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for(i = 0; i < CTL_MAX_INITIATORS; i++) {
if (i == residx || ctl_get_prkey(lun, i) == 0)
continue;
if (sa_res_key == ctl_get_prkey(lun, i)) {
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
} else if (type != lun->res_type
&& (lun->res_type == SPR_TYPE_WR_EX_RO
|| lun->res_type ==SPR_TYPE_EX_AC_RO)){
- ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE);
+ ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
}
lun->res_type = type;
@@ -7738,6 +7858,8 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
lun->pr_res_idx = residx;
else
lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7747,13 +7869,8 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned "
- "from ctl_ha_msg_send %d\n",
- isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
} else {
/*
* sa_res_key is not the res holder just
@@ -7761,14 +7878,14 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
*/
int found=0;
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (sa_res_key != ctl_get_prkey(lun, i))
continue;
found = 1;
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
if (!found) {
@@ -7778,6 +7895,9 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
ctl_done((union ctl_io *)ctsio);
return (1);
}
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
+
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
@@ -7786,20 +7906,11 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned "
- "from ctl_ha_msg_send %d\n",
- isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
}
}
-
- lun->PRGeneration++;
- mtx_unlock(&lun->lun_lock);
-
- return (retval);
+ return (0);
}
static void
@@ -7818,13 +7929,13 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
* Unregister everybody else and build UA for
* them
*/
- for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for(i = 0; i < CTL_MAX_INITIATORS; i++) {
if (i == msg->pr.pr_info.residx ||
ctl_get_prkey(lun, i) == 0)
continue;
ctl_clr_prkey(lun, i);
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
lun->pr_key_count = 1;
@@ -7833,17 +7944,17 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
&& lun->res_type != SPR_TYPE_EX_AC_AR)
lun->pr_res_idx = msg->pr.pr_info.residx;
} else {
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (sa_res_key == ctl_get_prkey(lun, i))
continue;
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
}
} else {
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (i == msg->pr.pr_info.residx ||
ctl_get_prkey(lun, i) == 0)
continue;
@@ -7851,11 +7962,11 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
if (sa_res_key == ctl_get_prkey(lun, i)) {
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
} else if (msg->pr.pr_info.res_type != lun->res_type
&& (lun->res_type == SPR_TYPE_WR_EX_RO
|| lun->res_type == SPR_TYPE_EX_AC_RO)) {
- ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE);
+ ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
}
lun->res_type = msg->pr.pr_info.res_type;
@@ -7874,7 +7985,6 @@ int
ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
{
int retval;
- int isc_retval;
u_int32_t param_len;
struct scsi_per_res_out *cdb;
struct ctl_lun *lun;
@@ -7944,7 +8054,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
res_key = scsi_8btou64(param->res_key.key);
sa_res_key = scsi_8btou64(param->serv_act_res_key);
@@ -8059,9 +8169,8 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* RELEASED
*/
- for (i = 0; i < CTL_MAX_INITIATORS;i++){
- if (ctl_get_prkey(lun, i +
- softc->persis_offset) == 0)
+ for (i = softc->init_min; i < softc->init_max; i++){
+ if (ctl_get_prkey(lun, i) == 0)
continue;
ctl_est_ua(lun, i,
CTL_UA_RES_RELEASE);
@@ -8075,16 +8184,15 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
lun->pr_res_idx = CTL_PR_NO_RESERVATION;
}
}
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
+
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
persis_io.pr.pr_info.residx = residx;
- if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0 )) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
} else /* sa_res_key != 0 */ {
/*
@@ -8095,6 +8203,8 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
if (ctl_get_prkey(lun, residx) == 0)
lun->pr_key_count++;
ctl_set_prkey(lun, residx, sa_res_key);
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -8103,15 +8213,9 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
}
- lun->PRGeneration++;
- mtx_unlock(&lun->lun_lock);
break;
}
@@ -8158,12 +8262,8 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
persis_io.pr.pr_info.action = CTL_PR_RESERVE;
persis_io.pr.pr_info.residx = lun->pr_res_idx;
persis_io.pr.pr_info.res_type = type;
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
}
break;
@@ -8207,24 +8307,20 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
*/
if (type != SPR_TYPE_EX_AC
&& type != SPR_TYPE_WR_EX) {
- for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- if (i == residx ||
- ctl_get_prkey(lun,
- i + softc->persis_offset) == 0)
+ for (i = softc->init_min; i < softc->init_max; i++) {
+ if (i == residx || ctl_get_prkey(lun, i) == 0)
continue;
ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
}
mtx_unlock(&lun->lun_lock);
+
/* Send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_RELEASE;
- if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
- sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
break;
case SPRO_CLEAR:
@@ -8237,21 +8333,19 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
lun->pr_res_idx = CTL_PR_NO_RESERVATION;
ctl_clr_prkey(lun, residx);
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
+ for (i = 0; i < CTL_MAX_INITIATORS; i++)
if (ctl_get_prkey(lun, i) != 0) {
ctl_clr_prkey(lun, i);
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
lun->PRGeneration++;
mtx_unlock(&lun->lun_lock);
+
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_CLEAR;
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
- sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
break;
case SPRO_PREEMPT:
@@ -8326,9 +8420,8 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
* RELEASED
*/
- for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- if (ctl_get_prkey(lun, i +
- softc->persis_offset) == 0)
+ for (i = softc->init_min; i < softc->init_max; i++) {
+ if (ctl_get_prkey(lun, i) == 0)
continue;
ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
@@ -8359,8 +8452,8 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
*/
if (lun->res_type != SPR_TYPE_EX_AC
&& lun->res_type != SPR_TYPE_WR_EX) {
- for (i = 0; i < CTL_MAX_INITIATORS; i++)
- if (ctl_get_prkey(lun, i + softc->persis_offset) != 0)
+ for (i = softc->init_min; i < softc->init_max; i++)
+ if (ctl_get_prkey(lun, i) != 0)
ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
@@ -8378,11 +8471,11 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
lun->pr_key_count = 0;
lun->pr_res_idx = CTL_PR_NO_RESERVATION;
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i=0; i < CTL_MAX_INITIATORS; i++) {
if (ctl_get_prkey(lun, i) == 0)
continue;
ctl_clr_prkey(lun, i);
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
lun->PRGeneration++;
break;
@@ -9154,14 +9247,6 @@ ctl_tur(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
-#ifdef notyet
-static int
-ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
-{
-
-}
-#endif
-
/*
* SCSI VPD page 0x00, the Supported VPD Pages page.
*/
@@ -9437,7 +9522,7 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
softc = control_softc;
- port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
+ port = ctl_io_port(&ctsio->io_hdr);
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data_len = sizeof(struct scsi_vpd_device_id) +
@@ -9447,9 +9532,9 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
sizeof(struct scsi_vpd_id_trgt_port_grp_id);
if (lun && lun->lun_devid)
data_len += lun->lun_devid->len;
- if (port->port_devid)
+ if (port && port->port_devid)
data_len += port->port_devid->len;
- if (port->target_devid)
+ if (port && port->target_devid)
data_len += port->target_devid->len;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
@@ -9481,9 +9566,9 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
devid_ptr->page_code = SVPD_DEVICE_ID;
scsi_ulto2b(data_len - 4, devid_ptr->length);
- if (port->port_type == CTL_PORT_FC)
+ if (port && port->port_type == CTL_PORT_FC)
proto = SCSI_PROTO_FC << 4;
- else if (port->port_type == CTL_PORT_ISCSI)
+ else if (port && port->port_type == CTL_PORT_ISCSI)
proto = SCSI_PROTO_ISCSI << 4;
else
proto = SCSI_PROTO_SPI << 4;
@@ -9502,7 +9587,7 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
/*
* This is for the WWPN which is a port association.
*/
- if (port->port_devid) {
+ if (port && port->port_devid) {
memcpy(desc, port->port_devid->data, port->port_devid->len);
desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
port->port_devid->len);
@@ -9526,7 +9611,7 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
SVPD_ID_TYPE_TPORTGRP;
desc->length = 4;
- scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1,
+ scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1,
&desc->identifier[2]);
desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
sizeof(struct scsi_vpd_id_trgt_port_grp_id));
@@ -9534,7 +9619,7 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
/*
* This is for the Target identifier
*/
- if (port->target_devid) {
+ if (port && port->target_devid) {
memcpy(desc, port->target_devid->data, port->target_devid->len);
}
@@ -9554,15 +9639,10 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
struct scsi_vpd_port_designation_cont *pdc;
struct ctl_lun *lun;
struct ctl_port *port;
- int data_len, num_target_ports, iid_len, id_len, g, pg, p;
- int num_target_port_groups;
+ int data_len, num_target_ports, iid_len, id_len;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- if (softc->is_single)
- num_target_port_groups = 1;
- else
- num_target_port_groups = NUM_TARGET_PORT_GROUPS;
num_target_ports = 0;
iid_len = 0;
id_len = 0;
@@ -9581,7 +9661,7 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
}
mtx_unlock(&softc->ctl_lock);
- data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups *
+ data_len = sizeof(struct scsi_vpd_scsi_ports) +
num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
@@ -9618,35 +9698,31 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
pd = &sp->design[0];
mtx_lock(&softc->ctl_lock);
- pg = softc->port_offset / CTL_MAX_PORTS;
- for (g = 0; g < num_target_port_groups; g++) {
- STAILQ_FOREACH(port, &softc->port_list, links) {
- if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
- continue;
- if (lun != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
- continue;
- p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
- scsi_ulto2b(p, pd->relative_port_id);
- if (port->init_devid && g == pg) {
- iid_len = port->init_devid->len;
- memcpy(pd->initiator_transportid,
- port->init_devid->data, port->init_devid->len);
- } else
- iid_len = 0;
- scsi_ulto2b(iid_len, pd->initiator_transportid_length);
- pdc = (struct scsi_vpd_port_designation_cont *)
- (&pd->initiator_transportid[iid_len]);
- if (port->port_devid && g == pg) {
- id_len = port->port_devid->len;
- memcpy(pdc->target_port_descriptors,
- port->port_devid->data, port->port_devid->len);
- } else
- id_len = 0;
- scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
- pd = (struct scsi_vpd_port_designation *)
- ((uint8_t *)pdc->target_port_descriptors + id_len);
- }
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (lun != NULL &&
+ ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ continue;
+ scsi_ulto2b(port->targ_port, pd->relative_port_id);
+ if (port->init_devid) {
+ iid_len = port->init_devid->len;
+ memcpy(pd->initiator_transportid,
+ port->init_devid->data, port->init_devid->len);
+ } else
+ iid_len = 0;
+ scsi_ulto2b(iid_len, pd->initiator_transportid_length);
+ pdc = (struct scsi_vpd_port_designation_cont *)
+ (&pd->initiator_transportid[iid_len]);
+ if (port->port_devid) {
+ id_len = port->port_devid->len;
+ memcpy(pdc->target_port_descriptors,
+ port->port_devid->data, port->port_devid->len);
+ } else
+ id_len = 0;
+ scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
+ pd = (struct scsi_vpd_port_designation *)
+ ((uint8_t *)pdc->target_port_descriptors + id_len);
}
mtx_unlock(&softc->ctl_lock);
@@ -9913,6 +9989,7 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
struct scsi_inquiry_data *inq_ptr;
struct scsi_inquiry *cdb;
struct ctl_softc *softc;
+ struct ctl_port *port;
struct ctl_lun *lun;
char *val;
uint32_t alloc_len, data_len;
@@ -9925,8 +10002,11 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
* We treat the ioctl front end, and any SCSI adapters, as packetized
* SCSI front ends.
*/
- port_type = softc->ctl_ports[
- ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type;
+ port = ctl_io_port(&ctsio->io_hdr);
+ if (port != NULL)
+ port_type = port->port_type;
+ else
+ port_type = CTL_PORT_SCSI;
if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
port_type = CTL_PORT_SCSI;
@@ -9956,55 +10036,16 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
ctsio->kern_total_len = alloc_len;
}
- /*
- * If we have a LUN configured, report it as connected. Otherwise,
- * report that it is offline or no device is supported, depending
- * on the value of inquiry_pq_no_lun.
- *
- * According to the spec (SPC-4 r34), the peripheral qualifier
- * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario:
- *
- * "A peripheral device having the specified peripheral device type
- * is not connected to this logical unit. However, the device
- * server is capable of supporting the specified peripheral device
- * type on this logical unit."
- *
- * According to the same spec, the peripheral qualifier
- * SID_QUAL_BAD_LU (011b) is used in this scenario:
- *
- * "The device server is not capable of supporting a peripheral
- * device on this logical unit. For this peripheral qualifier the
- * peripheral device type shall be set to 1Fh. All other peripheral
- * device type values are reserved for this peripheral qualifier."
- *
- * Given the text, it would seem that we probably want to report that
- * the LUN is offline here. There is no LUN connected, but we can
- * support a LUN at the given LUN number.
- *
- * In the real world, though, it sounds like things are a little
- * different:
- *
- * - Linux, when presented with a LUN with the offline peripheral
- * qualifier, will create an sg driver instance for it. So when
- * you attach it to CTL, you wind up with a ton of sg driver
- * instances. (One for every LUN that Linux bothered to probe.)
- * Linux does this despite the fact that it issues a REPORT LUNs
- * to LUN 0 to get the inventory of supported LUNs.
- *
- * - There is other anecdotal evidence (from Emulex folks) about
- * arrays that use the offline peripheral qualifier for LUNs that
- * are on the "passive" path in an active/passive array.
- *
- * So the solution is provide a hopefully reasonable default
- * (return bad/no LUN) and allow the user to change the behavior
- * with a tunable/sysctl variable.
- */
- if (lun != NULL)
- inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
- lun->be_lun->lun_type;
- else if (softc->inquiry_pq_no_lun == 0)
- inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
- else
+ if (lun != NULL) {
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
+ softc->ha_link >= CTL_HA_LINK_UNKNOWN) {
+ inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ } else {
+ inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) |
+ lun->be_lun->lun_type;
+ }
+ } else
inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
/* RMB in byte 2 is 0 */
@@ -10594,6 +10635,7 @@ ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
static int
ctl_check_blocked(struct ctl_lun *lun)
{
+ struct ctl_softc *softc = lun->ctl_softc;
union ctl_io *cur_blocked, *next_blocked;
mtx_assert(&lun->lun_lock, MA_OWNED);
@@ -10639,7 +10681,6 @@ ctl_check_blocked(struct ctl_lun *lun)
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP: {
const struct ctl_cmd_entry *entry;
- int isc_retval;
/*
* The skip case shouldn't happen, this transaction
@@ -10655,24 +10696,21 @@ ctl_check_blocked(struct ctl_lun *lun)
blocked_links);
cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
- if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
+ if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
+ (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){
/*
* Need to send IO back to original side to
* run
*/
union ctl_ha_msg msg_info;
+ cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
msg_info.hdr.original_sc =
cur_blocked->io_hdr.original_sc;
msg_info.hdr.serializing_sc = cur_blocked;
msg_info.hdr.msg_type = CTL_MSG_R2R;
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &msg_info, sizeof(msg_info), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Check Blocked error from "
- "ctl_ha_msg_send %d\n",
- isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.hdr), M_NOWAIT);
break;
}
entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
@@ -10731,14 +10769,32 @@ ctl_scsiio_lun_check(struct ctl_lun *lun,
mtx_assert(&lun->lun_lock, MA_OWNED);
/*
- * If this shelf is a secondary shelf controller, we have to reject
- * any media access commands.
+ * If this shelf is a secondary shelf controller, we may have to
+ * reject some commands disallowed by HA mode and link state.
*/
- if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 &&
- (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) {
- ctl_set_lun_standby(ctsio);
- retval = 1;
- goto bailout;
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
+ if (softc->ha_link == CTL_HA_LINK_OFFLINE &&
+ (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
+ ctl_set_lun_unavail(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+ if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 &&
+ (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
+ ctl_set_lun_transit(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+ if (softc->ha_mode == CTL_HA_MODE_ACT_STBY &&
+ (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) {
+ ctl_set_lun_standby(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+
+ /* The rest of checks are only done on executing side */
+ if (softc->ha_mode == CTL_HA_MODE_XFER)
+ goto bailout;
}
if (entry->pattern & CTL_LUN_PAT_WRITE) {
@@ -10765,7 +10821,7 @@ ctl_scsiio_lun_check(struct ctl_lun *lun,
* even on reserved LUNs, and if this initiator isn't the one who
* reserved us, reject the command with a reservation conflict.
*/
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
if ((lun->flags & CTL_LUN_RESERVED)
&& ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
if (lun->res_idx != residx) {
@@ -10795,20 +10851,15 @@ ctl_scsiio_lun_check(struct ctl_lun *lun,
retval = 1;
goto bailout;
}
-
}
if ((lun->flags & CTL_LUN_OFFLINE)
- && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
+ && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) {
ctl_set_lun_not_ready(ctsio);
retval = 1;
goto bailout;
}
- /*
- * If the LUN is stopped, see if this particular command is allowed
- * for a stopped lun. Otherwise, reject it with 0x04,0x02.
- */
if ((lun->flags & CTL_LUN_STOPPED)
&& ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
/* "Logical unit not ready, initializing cmd. required" */
@@ -10827,7 +10878,6 @@ ctl_scsiio_lun_check(struct ctl_lun *lun,
bailout:
return (retval);
-
}
static void
@@ -10837,248 +10887,69 @@ ctl_failover_io(union ctl_io *io, int have_lock)
ctl_done(io);
}
-#ifdef notyet
static void
-ctl_failover(void)
+ctl_failover_lun(struct ctl_lun *lun)
{
- struct ctl_lun *lun;
- struct ctl_softc *softc;
- union ctl_io *next_io, *pending_io;
- union ctl_io *io;
- int lun_idx;
-
- softc = control_softc;
-
- mtx_lock(&softc->ctl_lock);
- /*
- * Remove any cmds from the other SC from the rtr queue. These
- * will obviously only be for LUNs for which we're the primary.
- * We can't send status or get/send data for these commands.
- * Since they haven't been executed yet, we can just remove them.
- * We'll either abort them or delete them below, depending on
- * which HA mode we're in.
- */
-#ifdef notyet
- mtx_lock(&softc->queue_lock);
- for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
- io != NULL; io = next_io) {
- next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
- if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
- STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
- ctl_io_hdr, links);
- }
- mtx_unlock(&softc->queue_lock);
-#endif
-
- for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) {
- lun = softc->ctl_luns[lun_idx];
- if (lun==NULL)
- continue;
-
- /*
- * Processor LUNs are primary on both sides.
- * XXX will this always be true?
- */
- if (lun->be_lun->lun_type == T_PROCESSOR)
- continue;
-
- if ((lun->flags & CTL_LUN_PRIMARY_SC)
- && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
- printf("FAILOVER: primary lun %d\n", lun_idx);
- /*
- * Remove all commands from the other SC. First from the
- * blocked queue then from the ooa queue. Once we have
- * removed them. Call ctl_check_blocked to see if there
- * is anything that can run.
- */
- for (io = (union ctl_io *)TAILQ_FIRST(
- &lun->blocked_queue); io != NULL; io = next_io) {
-
- next_io = (union ctl_io *)TAILQ_NEXT(
- &io->io_hdr, blocked_links);
-
- if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
- TAILQ_REMOVE(&lun->blocked_queue,
- &io->io_hdr,blocked_links);
- io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
- TAILQ_REMOVE(&lun->ooa_queue,
- &io->io_hdr, ooa_links);
-
- ctl_free_io(io);
- }
- }
-
- for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
- io != NULL; io = next_io) {
-
- next_io = (union ctl_io *)TAILQ_NEXT(
- &io->io_hdr, ooa_links);
-
- if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
-
- TAILQ_REMOVE(&lun->ooa_queue,
- &io->io_hdr,
- ooa_links);
-
- ctl_free_io(io);
+ struct ctl_softc *softc = lun->ctl_softc;
+ struct ctl_io_hdr *io, *next_io;
+
+ CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun));
+ if (softc->ha_mode == CTL_HA_MODE_XFER) {
+ TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
+ /* We are master */
+ if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+ if (io->flags & CTL_FLAG_IO_ACTIVE) {
+ io->flags |= CTL_FLAG_ABORT;
+ } else { /* This can be only due to DATAMOVE */
+ io->msg_type = CTL_MSG_DATAMOVE_DONE;
+ io->flags |= CTL_FLAG_IO_ACTIVE;
+ io->port_status = 31340;
+ ctl_enqueue_isc((union ctl_io *)io);
}
}
- ctl_check_blocked(lun);
- } else if ((lun->flags & CTL_LUN_PRIMARY_SC)
- && (softc->ha_mode == CTL_HA_MODE_XFER)) {
-
- printf("FAILOVER: primary lun %d\n", lun_idx);
- /*
- * Abort all commands from the other SC. We can't
- * send status back for them now. These should get
- * cleaned up when they are completed or come out
- * for a datamove operation.
- */
- for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
- io != NULL; io = next_io) {
- next_io = (union ctl_io *)TAILQ_NEXT(
- &io->io_hdr, ooa_links);
-
- if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
- io->io_hdr.flags |= CTL_FLAG_ABORT;
- }
- } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
- && (softc->ha_mode == CTL_HA_MODE_XFER)) {
-
- printf("FAILOVER: secondary lun %d\n", lun_idx);
-
- lun->flags |= CTL_LUN_PRIMARY_SC;
-
- /*
- * We send all I/O that was sent to this controller
- * and redirected to the other side back with
- * busy status, and have the initiator retry it.
- * Figuring out how much data has been transferred,
- * etc. and picking up where we left off would be
- * very tricky.
- *
- * XXX KDM need to remove I/O from the blocked
- * queue as well!
- */
- for (pending_io = (union ctl_io *)TAILQ_FIRST(
- &lun->ooa_queue); pending_io != NULL;
- pending_io = next_io) {
-
- next_io = (union ctl_io *)TAILQ_NEXT(
- &pending_io->io_hdr, ooa_links);
-
- pending_io->io_hdr.flags &=
- ~CTL_FLAG_SENT_2OTHER_SC;
-
- if (pending_io->io_hdr.flags &
- CTL_FLAG_IO_ACTIVE) {
- pending_io->io_hdr.flags |=
- CTL_FLAG_FAILOVER;
+ /* We are slave */
+ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
+ io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+ if (io->flags & CTL_FLAG_IO_ACTIVE) {
+ io->flags |= CTL_FLAG_FAILOVER;
} else {
- ctl_set_busy(&pending_io->scsiio);
- ctl_done(pending_io);
+ ctl_set_busy(&((union ctl_io *)io)->
+ scsiio);
+ ctl_done((union ctl_io *)io);
}
}
-
- ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
- } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
- && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
- printf("FAILOVER: secondary lun %d\n", lun_idx);
- /*
- * if the first io on the OOA is not on the RtR queue
- * add it.
- */
- lun->flags |= CTL_LUN_PRIMARY_SC;
-
- pending_io = (union ctl_io *)TAILQ_FIRST(
- &lun->ooa_queue);
- if (pending_io==NULL) {
- printf("Nothing on OOA queue\n");
- continue;
- }
-
- pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
- if ((pending_io->io_hdr.flags &
- CTL_FLAG_IS_WAS_ON_RTR) == 0) {
- pending_io->io_hdr.flags |=
- CTL_FLAG_IS_WAS_ON_RTR;
- ctl_enqueue_rtr(pending_io);
+ }
+ } else { /* SERIALIZE modes */
+ TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links,
+ next_io) {
+ /* We are master */
+ if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+ TAILQ_REMOVE(&lun->blocked_queue, io,
+ blocked_links);
+ io->flags &= ~CTL_FLAG_BLOCKED;
+ TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
+ ctl_free_io((union ctl_io *)io);
}
-#if 0
- else
- {
- printf("Tag 0x%04x is running\n",
- pending_io->scsiio.tag_num);
+ }
+ TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
+ /* We are master */
+ if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+ TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
+ ctl_free_io((union ctl_io *)io);
}
-#endif
-
- next_io = (union ctl_io *)TAILQ_NEXT(
- &pending_io->io_hdr, ooa_links);
- for (pending_io=next_io; pending_io != NULL;
- pending_io = next_io) {
- pending_io->io_hdr.flags &=
- ~CTL_FLAG_SENT_2OTHER_SC;
- next_io = (union ctl_io *)TAILQ_NEXT(
- &pending_io->io_hdr, ooa_links);
- if (pending_io->io_hdr.flags &
- CTL_FLAG_IS_WAS_ON_RTR) {
-#if 0
- printf("Tag 0x%04x is running\n",
- pending_io->scsiio.tag_num);
-#endif
- continue;
- }
-
- switch (ctl_check_ooa(lun, pending_io,
- (union ctl_io *)TAILQ_PREV(
- &pending_io->io_hdr, ctl_ooaq,
- ooa_links))) {
-
- case CTL_ACTION_BLOCK:
- TAILQ_INSERT_TAIL(&lun->blocked_queue,
- &pending_io->io_hdr,
- blocked_links);
- pending_io->io_hdr.flags |=
- CTL_FLAG_BLOCKED;
- break;
- case CTL_ACTION_PASS:
- case CTL_ACTION_SKIP:
- pending_io->io_hdr.flags |=
- CTL_FLAG_IS_WAS_ON_RTR;
- ctl_enqueue_rtr(pending_io);
- break;
- case CTL_ACTION_OVERLAP:
- ctl_set_overlapped_cmd(
- (struct ctl_scsiio *)pending_io);
- ctl_done(pending_io);
- break;
- case CTL_ACTION_OVERLAP_TAG:
- ctl_set_overlapped_tag(
- (struct ctl_scsiio *)pending_io,
- pending_io->scsiio.tag_num & 0xff);
- ctl_done(pending_io);
- break;
- case CTL_ACTION_ERROR:
- default:
- ctl_set_internal_failure(
- (struct ctl_scsiio *)pending_io,
- 0, // sks_valid
- 0); //retry count
- ctl_done(pending_io);
- break;
+ /* We are slave */
+ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
+ io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+ if (!(io->flags & CTL_FLAG_IO_ACTIVE)) {
+ ctl_set_busy(&((union ctl_io *)io)->
+ scsiio);
+ ctl_done((union ctl_io *)io);
}
}
-
- ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
- } else {
- panic("Unhandled HA mode failover, LUN flags = %#x, "
- "ha_mode = #%x", lun->flags, softc->ha_mode);
}
+ ctl_check_blocked(lun);
}
- ctl_pause_rtr = 0;
- mtx_unlock(&softc->ctl_lock);
}
-#endif
static int
ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
@@ -11110,9 +10981,6 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
lun->be_lun;
- if (lun->be_lun->lun_type == T_PROCESSOR) {
- ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
- }
/*
* Every I/O goes into the OOA queue for a
@@ -11247,45 +11115,31 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
* find it easily. Something similar will need be done on the other
* side so when we are done we can find the copy.
*/
- if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+ (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0) {
union ctl_ha_msg msg_info;
int isc_retval;
ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
+ ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+ mtx_unlock(&lun->lun_lock);
msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
msg_info.hdr.original_sc = (union ctl_io *)ctsio;
-#if 0
- printf("1. ctsio %p\n", ctsio);
-#endif
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.nexus = ctsio->io_hdr.nexus;
msg_info.scsi.tag_num = ctsio->tag_num;
msg_info.scsi.tag_type = ctsio->tag_type;
+ msg_info.scsi.cdb_len = ctsio->cdb_len;
memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
- ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
-
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info, sizeof(msg_info), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
- isc_retval);
- printf("CTL:opcode is %x\n", ctsio->cdb[0]);
- } else {
-#if 0
- printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
-#endif
+ if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data),
+ M_WAITOK)) > CTL_HA_STATUS_SUCCESS) {
+ ctl_set_busy(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
}
-
- /*
- * XXX KDM this I/O is off the incoming queue, but hasn't
- * been inserted on any other queue. We may need to come
- * up with a holding queue while we wait for serialization
- * so that we have an idea of what we're waiting for from
- * the other side.
- */
- mtx_unlock(&lun->lun_lock);
return (retval);
}
@@ -11455,7 +11309,6 @@ ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
union ctl_ha_msg msg_info;
- io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
msg_info.hdr.nexus = io->io_hdr.nexus;
if (ua_type==CTL_UA_TARG_RESET)
msg_info.task.task_action = CTL_TASK_TARGET_RESET;
@@ -11464,9 +11317,8 @@ ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
- if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info, sizeof(msg_info), 0)) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.task), M_WAITOK);
}
retval = 0;
@@ -11584,8 +11436,8 @@ ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
- ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info, sizeof(msg_info), 0);
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.task), M_NOWAIT);
}
}
}
@@ -11629,10 +11481,9 @@ ctl_i_t_nexus_reset(union ctl_io *io)
{
struct ctl_softc *softc = control_softc;
struct ctl_lun *lun;
- uint32_t initidx, residx;
+ uint32_t initidx;
initidx = ctl_get_initindex(&io->io_hdr.nexus);
- residx = ctl_get_resindex(&io->io_hdr.nexus);
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
mtx_lock(&lun->lun_lock);
@@ -11642,7 +11493,7 @@ ctl_i_t_nexus_reset(union ctl_io *io)
#ifdef CTL_WITH_CA
ctl_clear_mask(lun->have_ca, initidx);
#endif
- if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
+ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
lun->flags &= ~CTL_LUN_RESERVED;
ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS);
mtx_unlock(&lun->lun_lock);
@@ -11747,7 +11598,6 @@ ctl_abort_task(union ctl_io *io)
!(lun->flags & CTL_LUN_PRIMARY_SC)) {
union ctl_ha_msg msg_info;
- io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
msg_info.hdr.nexus = io->io_hdr.nexus;
msg_info.task.task_action = CTL_TASK_ABORT_TASK;
msg_info.task.tag_num = io->taskio.tag_num;
@@ -11758,10 +11608,8 @@ ctl_abort_task(union ctl_io *io)
#if 0
printf("Sent Abort to other side\n");
#endif
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info, sizeof(msg_info), 0) !=
- CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.task), M_NOWAIT);
}
#if 0
printf("ctl_abort_task: found I/O to abort\n");
@@ -11855,30 +11703,20 @@ ctl_run_task(union ctl_io *io)
retval = 1;
break;
}
+ retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET);
+ mtx_unlock(&softc->ctl_lock);
- if (!(io->io_hdr.flags &
- CTL_FLAG_FROM_OTHER_SC)) {
+ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
union ctl_ha_msg msg_info;
- io->io_hdr.flags |=
- CTL_FLAG_SENT_2OTHER_SC;
- msg_info.hdr.msg_type =
- CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.nexus = io->io_hdr.nexus;
- msg_info.task.task_action =
- CTL_TASK_LUN_RESET;
+ msg_info.task.task_action = CTL_TASK_LUN_RESET;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
- if (CTL_HA_STATUS_SUCCESS !=
- ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info,
- sizeof(msg_info), 0)) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.task), M_WAITOK);
}
-
- retval = ctl_lun_reset(lun, io,
- CTL_UA_LUN_RESET);
- mtx_unlock(&softc->ctl_lock);
break;
}
case CTL_TASK_TARGET_RESET:
@@ -11976,6 +11814,12 @@ ctl_handle_isc(union ctl_io *io)
free_io = 0;
io->scsiio.be_move_done(io);
break;
+ case CTL_MSG_FAILOVER:
+ mtx_lock(&lun->lun_lock);
+ ctl_failover_lun(lun);
+ mtx_unlock(&lun->lun_lock);
+ free_io = 1;
+ break;
default:
free_io = 1;
printf("%s: Invalid message type %d\n",
@@ -12177,10 +12021,6 @@ ctl_datamove(union ctl_io *io)
#ifdef CTL_IO_DELAY
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
- struct ctl_lun *lun;
-
- lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
} else {
struct ctl_lun *lun;
@@ -12257,21 +12097,7 @@ ctl_datamove(union ctl_io *io)
*/
if (io->scsiio.kern_sg_entries == 0) {
msg.dt.kern_sg_entries = 1;
- /*
- * If this is in cached memory, flush the cache
- * before we send the DMA request to the other
- * controller. We want to do this in either the
- * read or the write case. The read case is
- * straightforward. In the write case, we want to
- * make sure nothing is in the local cache that
- * could overwrite the DMAed data.
- */
- if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
- /*
- * XXX KDM use bus_dmamap_sync() here.
- */
- }
-
+#if 0
/*
* Convert to a physical address if this is a
* virtual address.
@@ -12283,25 +12109,20 @@ ctl_datamove(union ctl_io *io)
/*
* XXX KDM use busdma here!
*/
-#if 0
msg.dt.sg_list[0].addr = (void *)
vtophys(io->scsiio.kern_data_ptr);
-#endif
}
+#else
+ KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
+ ("HA does not support BUS_ADDR"));
+ msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
+#endif
msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
do_sg_copy = 0;
} else {
- struct ctl_sg_entry *sgl;
-
- do_sg_copy = 1;
msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
- sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
- if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
- /*
- * XXX KDM use bus_dmamap_sync() here.
- */
- }
+ do_sg_copy = 1;
}
msg.dt.kern_data_len = io->scsiio.kern_data_len;
@@ -12340,25 +12161,24 @@ ctl_datamove(union ctl_io *io)
for (i = sg_entries_sent, j = 0;
i < msg.dt.cur_sg_entries; i++, j++) {
- if ((io->io_hdr.flags &
- CTL_FLAG_NO_DATASYNC) == 0) {
- /*
- * XXX KDM use bus_dmamap_sync()
- */
- }
+#if 0
if ((io->io_hdr.flags &
CTL_FLAG_BUS_ADDR) == 0) {
/*
* XXX KDM use busdma.
*/
-#if 0
msg.dt.sg_list[j].addr =(void *)
vtophys(sgl[i].addr);
-#endif
} else {
msg.dt.sg_list[j].addr =
sgl[i].addr;
}
+#else
+ KASSERT((io->io_hdr.flags &
+ CTL_FLAG_BUS_ADDR) == 0,
+ ("HA does not support BUS_ADDR"));
+ msg.dt.sg_list[j].addr = sgl[i].addr;
+#endif
msg.dt.sg_list[j].len = sgl[i].len;
}
}
@@ -12369,30 +12189,25 @@ ctl_datamove(union ctl_io *io)
else
msg.dt.sg_last = 0;
- /*
- * XXX KDM drop and reacquire the lock here?
- */
if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
- sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
- /*
- * XXX do something here.
- */
+ sizeof(msg.dt) - sizeof(msg.dt.sg_list) +
+ sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries,
+ M_WAITOK) > CTL_HA_STATUS_SUCCESS) {
+ io->io_hdr.port_status = 31341;
+ io->scsiio.be_move_done(io);
+ return;
}
msg.dt.sent_sg_entries = sg_entries_sent;
}
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
- if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
- ctl_failover_io(io, /*have_lock*/ 0);
-
} else {
/*
* Lookup the fe_datamove() function for this particular
* front end.
*/
- fe_datamove =
- control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+ fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
fe_datamove(io);
}
@@ -12402,7 +12217,6 @@ static void
ctl_send_datamove_done(union ctl_io *io, int have_lock)
{
union ctl_ha_msg msg;
- int isc_status;
memset(&msg, 0, sizeof(msg));
@@ -12415,7 +12229,7 @@ ctl_send_datamove_done(union ctl_io *io, int have_lock)
msg.scsi.tag_type = io->scsiio.tag_type;
msg.scsi.scsi_status = io->scsiio.scsi_status;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
- sizeof(io->scsiio.sense_data));
+ io->scsiio.sense_len);
msg.scsi.sense_len = io->scsiio.sense_len;
msg.scsi.sense_residual = io->scsiio.sense_residual;
msg.scsi.fetd_status = io->io_hdr.port_status;
@@ -12427,11 +12241,9 @@ ctl_send_datamove_done(union ctl_io *io, int have_lock)
return;
}
- isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
- if (isc_status > CTL_HA_STATUS_SUCCESS) {
- /* XXX do something if this fails */
- }
-
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
+ msg.scsi.sense_len, M_WAITOK);
}
/*
@@ -12442,6 +12254,7 @@ static void
ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
{
union ctl_io *io;
+ int i;
io = rq->context;
@@ -12455,14 +12268,12 @@ ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
ctl_dt_req_free(rq);
- /*
- * In this case, we had to malloc the memory locally. Free it.
- */
- if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
- int i;
- for (i = 0; i < io->scsiio.kern_sg_entries; i++)
- free(io->io_hdr.local_sglist[i].addr, M_CTL);
- }
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ free(io->io_hdr.remote_sglist, M_CTL);
+ io->io_hdr.remote_sglist = NULL;
+ io->io_hdr.local_sglist = NULL;
+
/*
* The data is in local and remote memory, so now we need to send
* status (good or back) back to the other side.
@@ -12512,7 +12323,7 @@ ctl_datamove_remote_write(union ctl_io *io)
*/
io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
- fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+ fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
fe_datamove(io);
@@ -12528,15 +12339,13 @@ ctl_datamove_remote_dm_read_cb(union ctl_io *io)
char path_str[64];
struct sbuf sb;
#endif
+ int i;
- /*
- * In this case, we had to malloc the memory locally. Free it.
- */
- if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
- int i;
- for (i = 0; i < io->scsiio.kern_sg_entries; i++)
- free(io->io_hdr.local_sglist[i].addr, M_CTL);
- }
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ free(io->io_hdr.remote_sglist, M_CTL);
+ io->io_hdr.remote_sglist = NULL;
+ io->io_hdr.local_sglist = NULL;
#if 0
scsi_path_string(io, path_str, sizeof(path_str));
@@ -12573,7 +12382,7 @@ ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
io = rq->context;
if (rq->ret != CTL_HA_STATUS_SUCCESS) {
- printf("%s: ISC DMA read failed with error %d", __func__,
+ printf("%s: ISC DMA read failed with error %d\n", __func__,
rq->ret);
ctl_set_internal_failure(&io->scsiio,
/*sks_valid*/ 1,
@@ -12593,7 +12402,7 @@ ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
/* XXX KDM add checks like the ones in ctl_datamove? */
- fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+ fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
fe_datamove(io);
}
@@ -12602,134 +12411,43 @@ static int
ctl_datamove_remote_sgl_setup(union ctl_io *io)
{
struct ctl_sg_entry *local_sglist, *remote_sglist;
- struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
struct ctl_softc *softc;
+ uint32_t len_to_go;
int retval;
int i;
retval = 0;
softc = control_softc;
-
local_sglist = io->io_hdr.local_sglist;
- local_dma_sglist = io->io_hdr.local_dma_sglist;
remote_sglist = io->io_hdr.remote_sglist;
- remote_dma_sglist = io->io_hdr.remote_dma_sglist;
-
- if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
- for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
- local_sglist[i].len = remote_sglist[i].len;
-
- /*
- * XXX Detect the situation where the RS-level I/O
- * redirector on the other side has already read the
- * data off of the AOR RS on this side, and
- * transferred it to remote (mirror) memory on the
- * other side. Since we already have the data in
- * memory here, we just need to use it.
- *
- * XXX KDM this can probably be removed once we
- * get the cache device code in and take the
- * current AOR implementation out.
- */
-#ifdef NEEDTOPORT
- if ((remote_sglist[i].addr >=
- (void *)vtophys(softc->mirr->addr))
- && (remote_sglist[i].addr <
- ((void *)vtophys(softc->mirr->addr) +
- CacheMirrorOffset))) {
- local_sglist[i].addr = remote_sglist[i].addr -
- CacheMirrorOffset;
- if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
- CTL_FLAG_DATA_IN)
- io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
- } else {
- local_sglist[i].addr = remote_sglist[i].addr +
- CacheMirrorOffset;
- }
-#endif
-#if 0
- printf("%s: local %p, remote %p, len %d\n",
- __func__, local_sglist[i].addr,
- remote_sglist[i].addr, local_sglist[i].len);
-#endif
- }
- } else {
- uint32_t len_to_go;
-
- /*
- * In this case, we don't have automatically allocated
- * memory for this I/O on this controller. This typically
- * happens with internal CTL I/O -- e.g. inquiry, mode
- * sense, etc. Anything coming from RAIDCore will have
- * a mirror area available.
- */
- len_to_go = io->scsiio.kern_data_len;
-
- /*
- * Clear the no datasync flag, we have to use malloced
- * buffers.
- */
- io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
+ len_to_go = io->scsiio.kern_data_len;
- /*
- * The difficult thing here is that the size of the various
- * S/G segments may be different than the size from the
- * remote controller. That'll make it harder when DMAing
- * the data back to the other side.
- */
- for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
- sizeof(io->io_hdr.remote_sglist[0])) &&
- (len_to_go > 0); i++) {
- local_sglist[i].len = MIN(len_to_go, 131072);
- CTL_SIZE_8B(local_dma_sglist[i].len,
- local_sglist[i].len);
- local_sglist[i].addr =
- malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
-
- local_dma_sglist[i].addr = local_sglist[i].addr;
-
- if (local_sglist[i].addr == NULL) {
- int j;
-
- printf("malloc failed for %zd bytes!",
- local_dma_sglist[i].len);
- for (j = 0; j < i; j++) {
- free(local_sglist[j].addr, M_CTL);
- }
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/ 4857);
- retval = 1;
- goto bailout_error;
-
- }
- /* XXX KDM do we need a sync here? */
+ /*
+ * The difficult thing here is that the size of the various
+ * S/G segments may be different than the size from the
+ * remote controller. That'll make it harder when DMAing
+ * the data back to the other side.
+ */
+ for (i = 0; len_to_go > 0; i++) {
+ local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT);
+ local_sglist[i].addr =
+ malloc(local_sglist[i].len, M_CTL, M_WAITOK);
- len_to_go -= local_sglist[i].len;
- }
- /*
- * Reset the number of S/G entries accordingly. The
- * original number of S/G entries is available in
- * rem_sg_entries.
- */
- io->scsiio.kern_sg_entries = i;
+ len_to_go -= local_sglist[i].len;
+ }
+ /*
+ * Reset the number of S/G entries accordingly. The original
+ * number of S/G entries is available in rem_sg_entries.
+ */
+ io->scsiio.kern_sg_entries = i;
#if 0
- printf("%s: kern_sg_entries = %d\n", __func__,
- io->scsiio.kern_sg_entries);
- for (i = 0; i < io->scsiio.kern_sg_entries; i++)
- printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
- local_sglist[i].addr, local_sglist[i].len,
- local_dma_sglist[i].len);
+ printf("%s: kern_sg_entries = %d\n", __func__,
+ io->scsiio.kern_sg_entries);
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ printf("%s: sg[%d] = %p, %d\n", __func__, i,
+ local_sglist[i].addr, local_sglist[i].len);
#endif
- }
-
-
- return (retval);
-
-bailout_error:
-
- ctl_send_datamove_done(io, /*have_lock*/ 0);
return (retval);
}
@@ -12740,12 +12458,8 @@ ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
{
struct ctl_ha_dt_req *rq;
struct ctl_sg_entry *remote_sglist, *local_sglist;
- struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
uint32_t local_used, remote_used, total_used;
- int retval;
- int i, j;
-
- retval = 0;
+ int i, j, isc_ret;
rq = ctl_dt_req_alloc();
@@ -12771,26 +12485,15 @@ ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
ctl_send_datamove_done(io, /*have_lock*/ 0);
- retval = 1;
-
- goto bailout;
+ return (1);
}
local_sglist = io->io_hdr.local_sglist;
- local_dma_sglist = io->io_hdr.local_dma_sglist;
remote_sglist = io->io_hdr.remote_sglist;
- remote_dma_sglist = io->io_hdr.remote_dma_sglist;
local_used = 0;
remote_used = 0;
total_used = 0;
- if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
- rq->ret = CTL_HA_STATUS_SUCCESS;
- rq->context = io;
- callback(rq);
- goto bailout;
- }
-
/*
* Pull/push the data over the wire from/to the other controller.
* This takes into account the possibility that the local and
@@ -12801,12 +12504,11 @@ ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
* both the local and remote sglists is identical. Otherwise, we've
* essentially got a coding error of some sort.
*/
+ isc_ret = CTL_HA_STATUS_SUCCESS;
for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
- int isc_ret;
- uint32_t cur_len, dma_length;
+ uint32_t cur_len;
uint8_t *tmp_ptr;
- rq->id = CTL_HA_DATA_CTL;
rq->command = command;
rq->context = io;
@@ -12818,52 +12520,23 @@ ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
*/
cur_len = MIN(local_sglist[i].len - local_used,
remote_sglist[j].len - remote_used);
-
- /*
- * In this case, we have a size issue and need to decrease
- * the size, except in the case where we actually have less
- * than 8 bytes left. In that case, we need to increase
- * the DMA length to get the last bit.
- */
- if ((cur_len & 0x7) != 0) {
- if (cur_len > 0x7) {
- cur_len = cur_len - (cur_len & 0x7);
- dma_length = cur_len;
- } else {
- CTL_SIZE_8B(dma_length, cur_len);
- }
-
- } else
- dma_length = cur_len;
-
- /*
- * If we had to allocate memory for this I/O, instead of using
- * the non-cached mirror memory, we'll need to flush the cache
- * before trying to DMA to the other controller.
- *
- * We could end up doing this multiple times for the same
- * segment if we have a larger local segment than remote
- * segment. That shouldn't be an issue.
- */
- if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
- /*
- * XXX KDM use bus_dmamap_sync() here.
- */
- }
-
- rq->size = dma_length;
+ rq->size = cur_len;
tmp_ptr = (uint8_t *)local_sglist[i].addr;
tmp_ptr += local_used;
+#if 0
/* Use physical addresses when talking to ISC hardware */
if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
/* XXX KDM use busdma */
-#if 0
rq->local = vtophys(tmp_ptr);
-#endif
} else
rq->local = tmp_ptr;
+#else
+ KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
+ ("HA does not support BUS_ADDR"));
+ rq->local = tmp_ptr;
+#endif
tmp_ptr = (uint8_t *)remote_sglist[j].addr;
tmp_ptr += remote_used;
@@ -12887,18 +12560,6 @@ ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
if (total_used >= io->scsiio.kern_data_len)
rq->callback = callback;
- if ((rq->size & 0x7) != 0) {
- printf("%s: warning: size %d is not on 8b boundary\n",
- __func__, rq->size);
- }
- if (((uintptr_t)rq->local & 0x7) != 0) {
- printf("%s: warning: local %p not on 8b boundary\n",
- __func__, rq->local);
- }
- if (((uintptr_t)rq->remote & 0x7) != 0) {
- printf("%s: warning: remote %p not on 8b boundary\n",
- __func__, rq->local);
- }
#if 0
printf("%s: %s: local %#x remote %#x size %d\n", __func__,
(command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
@@ -12906,21 +12567,15 @@ ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
#endif
isc_ret = ctl_dt_single(rq);
- if (isc_ret == CTL_HA_STATUS_WAIT)
- continue;
-
- if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
- rq->ret = CTL_HA_STATUS_SUCCESS;
- } else {
- rq->ret = isc_ret;
- }
+ if (isc_ret > CTL_HA_STATUS_SUCCESS)
+ break;
+ }
+ if (isc_ret != CTL_HA_STATUS_WAIT) {
+ rq->ret = isc_ret;
callback(rq);
- goto bailout;
}
-bailout:
- return (retval);
-
+ return (0);
}
static void
@@ -12939,8 +12594,7 @@ ctl_datamove_remote_read(union ctl_io *io)
retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
ctl_datamove_remote_read_cb);
- if ((retval != 0)
- && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
+ if (retval != 0) {
/*
* Make sure we free memory if there was an error.. The
* ctl_datamove_remote_xfer() function will send the
@@ -12949,6 +12603,9 @@ ctl_datamove_remote_read(union ctl_io *io)
*/
for (i = 0; i < io->scsiio.kern_sg_entries; i++)
free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ free(io->io_hdr.remote_sglist, M_CTL);
+ io->io_hdr.remote_sglist = NULL;
+ io->io_hdr.local_sglist = NULL;
}
return;
@@ -12964,11 +12621,13 @@ ctl_datamove_remote_read(union ctl_io *io)
static void
ctl_datamove_remote(union ctl_io *io)
{
- struct ctl_softc *softc;
- softc = control_softc;
+ mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
- mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
+ if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
+ ctl_failover_io(io, /*have_lock*/ 0);
+ return;
+ }
/*
* Note that we look for an aborted I/O here, but don't do some of
@@ -12986,54 +12645,14 @@ ctl_datamove_remote(union ctl_io *io)
return;
}
- if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
ctl_datamove_remote_write(io);
- } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
+ else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
ctl_datamove_remote_read(io);
- } else {
- union ctl_ha_msg msg;
- struct scsi_sense_data *sense;
- uint8_t sks[3];
- int retry_count;
-
- memset(&msg, 0, sizeof(msg));
-
- msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
- msg.hdr.status = CTL_SCSI_ERROR;
- msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
-
- retry_count = 4243;
-
- sense = &msg.scsi.sense_data;
- sks[0] = SSD_SCS_VALID;
- sks[1] = (retry_count >> 8) & 0xff;
- sks[2] = retry_count & 0xff;
-
- /* "Internal target failure" */
- scsi_set_sense_data(sense,
- /*sense_format*/ SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
- /*asc*/ 0x44,
- /*ascq*/ 0x00,
- /*type*/ SSD_ELEM_SKS,
- /*size*/ sizeof(sks),
- /*data*/ sks,
- SSD_ELEM_NONE);
-
- io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
- if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
- ctl_failover_io(io, /*have_lock*/ 1);
- return;
- }
-
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
- CTL_HA_STATUS_SUCCESS) {
- /* XXX KDM what to do if this fails? */
- }
- return;
+ else {
+ io->io_hdr.port_status = 31339;
+ ctl_send_datamove_done(io, /*have_lock*/ 0);
}
-
}
static int
@@ -13042,11 +12661,15 @@ ctl_process_done(union ctl_io *io)
struct ctl_lun *lun;
struct ctl_softc *softc = control_softc;
void (*fe_done)(union ctl_io *io);
- uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
+ union ctl_ha_msg msg;
+ uint32_t targ_port = io->io_hdr.nexus.targ_port;
CTL_DEBUG_PRINT(("ctl_process_done\n"));
- fe_done = softc->ctl_ports[targ_port]->fe_done;
+ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0)
+ fe_done = softc->ctl_ports[targ_port]->fe_done;
+ else
+ fe_done = NULL;
#ifdef CTL_TIME_IO
if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
@@ -13204,20 +12827,19 @@ bailout:
* Tell the FETD or the other shelf controller we're done with this
* command. Note that only SCSI commands get to this point. Task
* management commands are completed above.
- *
- * We only send status to the other controller if we're in XFER
- * mode. In SER_ONLY mode, the I/O is done on the controller that
- * received the I/O (from CTL's perspective), and so the status is
- * generated there.
- *
- * XXX KDM if we hold the lock here, we could cause a deadlock
- * if the frontend comes back in in this context to queue
- * something.
*/
+ if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
+ (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) {
+ memset(&msg, 0, sizeof(msg));
+ msg.hdr.msg_type = CTL_MSG_FINISH_IO;
+ msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
+ msg.hdr.nexus = io->io_hdr.nexus;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg.scsi) - sizeof(msg.scsi.sense_data),
+ M_WAITOK);
+ }
if ((softc->ha_mode == CTL_HA_MODE_XFER)
&& (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
- union ctl_ha_msg msg;
-
memset(&msg, 0, sizeof(msg));
msg.hdr.msg_type = CTL_MSG_FINISH_IO;
msg.hdr.original_sc = io->io_hdr.original_sc;
@@ -13230,7 +12852,7 @@ bailout:
msg.scsi.sense_residual = io->scsiio.sense_residual;
msg.scsi.residual = io->scsiio.residual;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
- sizeof(io->scsiio.sense_data));
+ io->scsiio.sense_len);
/*
* We copy this whether or not this is an I/O-related
* command. Otherwise, we'd have to go and check to see
@@ -13241,11 +12863,9 @@ bailout:
&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
sizeof(msg.scsi.lbalen));
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
- sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
- /* XXX do something here */
- }
-
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
+ msg.scsi.sense_len, M_WAITOK);
ctl_free_io(io);
} else
fe_done(io);
@@ -13400,25 +13020,6 @@ ctl_done(union ctl_io *io)
if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
return;
- /*
- * We need to send a msg to the serializing shelf to finish the IO
- * as well. We don't send a finish message to the other shelf if
- * this is a task management command. Task management commands
- * aren't serialized in the OOA queue, but rather just executed on
- * both shelf controllers for commands that originated on that
- * controller.
- */
- if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
- && (io->io_hdr.io_type != CTL_IO_TASK)) {
- union ctl_ha_msg msg_io;
-
- msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
- msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
- sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
- }
- /* continue on to finish IO */
- }
#ifdef CTL_IO_DELAY
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
struct ctl_lun *lun;
@@ -13449,24 +13050,6 @@ ctl_done(union ctl_io *io)
ctl_enqueue_done(io);
}
-int
-ctl_isc(struct ctl_scsiio *ctsio)
-{
- struct ctl_lun *lun;
- int retval;
-
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
- CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
-
- CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
-
- retval = lun->backend->data_submit((union ctl_io *)ctsio);
-
- return (retval);
-}
-
-
static void
ctl_work_thread(void *arg)
{
@@ -13516,16 +13099,14 @@ ctl_work_thread(void *arg)
ctl_scsiio_precheck(softc, &io->scsiio);
continue;
}
- if (!ctl_pause_rtr) {
- io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
- if (io != NULL) {
- STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
- mtx_unlock(&thr->queue_lock);
- retval = ctl_scsiio(&io->scsiio);
- if (retval != CTL_RETVAL_COMPLETE)
- CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
- continue;
- }
+ io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
+ if (io != NULL) {
+ STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
+ mtx_unlock(&thr->queue_lock);
+ retval = ctl_scsiio(&io->scsiio);
+ if (retval != CTL_RETVAL_COMPLETE)
+ CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
+ continue;
}
/* Sleep until we have something to do. */
@@ -13568,8 +13149,9 @@ ctl_thresh_thread(void *arg)
struct scsi_da_rw_recovery_page *rwpage;
struct ctl_logical_block_provisioning_page *page;
const char *attr;
+ union ctl_ha_msg msg;
uint64_t thres, val;
- int i, e;
+ int i, e, set;
CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
@@ -13581,6 +13163,9 @@ ctl_thresh_thread(void *arg)
(lun->flags & CTL_LUN_OFFLINE) ||
lun->backend->lun_attr == NULL)
continue;
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+ softc->ha_mode == CTL_HA_MODE_XFER)
+ continue;
rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT];
if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0)
continue;
@@ -13625,12 +13210,32 @@ ctl_thresh_thread(void *arg)
time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
lun->lasttpt = time_uptime;
ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
- }
+ set = 1;
+ } else
+ set = 0;
} else {
lun->lasttpt = 0;
ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
+ set = -1;
}
mtx_unlock(&lun->lun_lock);
+ if (set != 0 &&
+ lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+ /* Send msg to other side. */
+ bzero(&msg.ua, sizeof(msg.ua));
+ msg.hdr.msg_type = CTL_MSG_UA;
+ msg.hdr.nexus.initid = -1;
+ msg.hdr.nexus.targ_port = -1;
+ msg.hdr.nexus.targ_lun = lun->lun;
+ msg.hdr.nexus.targ_mapped_lun = lun->lun;
+ msg.ua.ua_all = 1;
+ msg.ua.ua_set = (set > 0);
+ msg.ua.ua_type = CTL_UA_THIN_PROV_THRES;
+ mtx_unlock(&softc->ctl_lock); // XXX
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg.ua), M_WAITOK);
+ mtx_lock(&softc->ctl_lock);
+ }
}
mtx_unlock(&softc->ctl_lock);
pause("-", CTL_LBP_PERIOD * hz);
@@ -13679,7 +13284,6 @@ ctl_enqueue_done(union ctl_io *io)
wakeup(thr);
}
-#ifdef notyet
static void
ctl_enqueue_isc(union ctl_io *io)
{
@@ -13693,107 +13297,6 @@ ctl_enqueue_isc(union ctl_io *io)
wakeup(thr);
}
-/* Initialization and failover */
-
-void
-ctl_init_isc_msg(void)
-{
- printf("CTL: Still calling this thing\n");
-}
-
-/*
- * Init component
- * Initializes component into configuration defined by bootMode
- * (see hasc-sv.c)
- * returns hasc_Status:
- * OK
- * ERROR - fatal error
- */
-static ctl_ha_comp_status
-ctl_isc_init(struct ctl_ha_component *c)
-{
- ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
-
- c->status = ret;
- return ret;
-}
-
-/* Start component
- * Starts component in state requested. If component starts successfully,
- * it must set its own state to the requestrd state
- * When requested state is HASC_STATE_HA, the component may refine it
- * by adding _SLAVE or _MASTER flags.
- * Currently allowed state transitions are:
- * UNKNOWN->HA - initial startup
- * UNKNOWN->SINGLE - initial startup when no parter detected
- * HA->SINGLE - failover
- * returns ctl_ha_comp_status:
- * OK - component successfully started in requested state
- * FAILED - could not start the requested state, failover may
- * be possible
- * ERROR - fatal error detected, no future startup possible
- */
-static ctl_ha_comp_status
-ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
-{
- ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
-
- printf("%s: go\n", __func__);
-
- // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
- if (c->state == CTL_HA_STATE_UNKNOWN ) {
- control_softc->is_single = 0;
- if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
- != CTL_HA_STATUS_SUCCESS) {
- printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
- ret = CTL_HA_COMP_STATUS_ERROR;
- }
- } else if (CTL_HA_STATE_IS_HA(c->state)
- && CTL_HA_STATE_IS_SINGLE(state)){
- // HA->SINGLE transition
- ctl_failover();
- control_softc->is_single = 1;
- } else {
- printf("ctl_isc_start:Invalid state transition %X->%X\n",
- c->state, state);
- ret = CTL_HA_COMP_STATUS_ERROR;
- }
- if (CTL_HA_STATE_IS_SINGLE(state))
- control_softc->is_single = 1;
-
- c->state = state;
- c->status = ret;
- return ret;
-}
-
-/*
- * Quiesce component
- * The component must clear any error conditions (set status to OK) and
- * prepare itself to another Start call
- * returns ctl_ha_comp_status:
- * OK
- * ERROR
- */
-static ctl_ha_comp_status
-ctl_isc_quiesce(struct ctl_ha_component *c)
-{
- int ret = CTL_HA_COMP_STATUS_OK;
-
- ctl_pause_rtr = 1;
- c->status = ret;
- return ret;
-}
-
-struct ctl_ha_component ctl_ha_component_ctlisc =
-{
- .name = "CTL ISC",
- .state = CTL_HA_STATE_UNKNOWN,
- .init = ctl_isc_init,
- .start = ctl_isc_start,
- .quiesce = ctl_isc_quiesce
-};
-#endif
-
/*
* vim: ts=8
*/
diff --git a/sys/cam/ctl/ctl.h b/sys/cam/ctl/ctl.h
index 0e0bfb6..630e3bb 100644
--- a/sys/cam/ctl/ctl.h
+++ b/sys/cam/ctl/ctl.h
@@ -139,24 +139,12 @@ SYSCTL_DECL(_kern_cam_ctl);
#endif
/*
- * Call these routines to enable or disable front end ports.
- */
-int ctl_port_enable(ctl_port_type port_type);
-int ctl_port_disable(ctl_port_type port_type);
-/*
- * This routine grabs a list of frontend ports.
- */
-int ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
- int *num_entries_filled, int *num_entries_dropped,
- ctl_port_type port_type, int no_virtual);
-
-/*
* Put a string into an sbuf, escaping characters that are illegal or not
* recommended in XML. Note this doesn't escape everything, just > < and &.
*/
int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size);
-int ctl_ffz(uint32_t *mask, uint32_t size);
+int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last);
int ctl_set_mask(uint32_t *mask, uint32_t bit);
int ctl_clear_mask(uint32_t *mask, uint32_t bit);
int ctl_is_set(uint32_t *mask, uint32_t bit);
@@ -165,11 +153,6 @@ int ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
int ctl_control_page_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
uint8_t *page_ptr);
-/**
-int ctl_failover_sp_handler(struct ctl_scsiio *ctsio,
- struct ctl_page_index *page_index,
- uint8_t *page_ptr);
-**/
int ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc);
@@ -189,11 +172,12 @@ void ctl_data_submit_done(union ctl_io *io);
void ctl_config_read_done(union ctl_io *io);
void ctl_config_write_done(union ctl_io *io);
void ctl_portDB_changed(int portnum);
-#ifdef notyet
-void ctl_init_isc_msg(void);
-#endif
int ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td);
+struct ctl_lun;
+void ctl_isc_announce_lun(struct ctl_lun *lun);
+struct ctl_port;
+void ctl_isc_announce_port(struct ctl_port *port);
/*
* KPI to manipulate LUN/port options
diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h
index e7d544a..f5bfd83 100644
--- a/sys/cam/ctl/ctl_backend.h
+++ b/sys/cam/ctl/ctl_backend.h
@@ -308,6 +308,12 @@ int ctl_lun_offline(struct ctl_be_lun *be_lun);
int ctl_lun_online(struct ctl_be_lun *be_lun);
/*
+ * Called on LUN HA role change.
+ */
+int ctl_lun_primary(struct ctl_be_lun *be_lun);
+int ctl_lun_secondary(struct ctl_be_lun *be_lun);
+
+/*
* Let the backend notify the initiator about changed capacity.
*/
void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun);
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
index 7fd15b9..cc121ec 100644
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -87,7 +87,9 @@ __FBSDID("$FreeBSD$");
#include <cam/ctl/ctl.h>
#include <cam/ctl/ctl_backend.h>
#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_private.h>
#include <cam/ctl/ctl_error.h>
/*
@@ -219,6 +221,8 @@ struct ctl_be_block_io {
void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */
};
+extern struct ctl_softc *control_softc;
+
static int cbb_num_threads = 14;
TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
@@ -2228,7 +2232,13 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
else
cbe_lun->lun_type = T_DIRECT;
be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
- cbe_lun->flags = CTL_LUN_FLAG_PRIMARY;
+ cbe_lun->flags = 0;
+ value = ctl_get_opt(&cbe_lun->options, "ha_role");
+ if (value != NULL) {
+ if (strcmp(value, "primary") == 0)
+ cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+ } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
+ cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
if (cbe_lun->lun_type == T_DIRECT) {
be_lun->size_bytes = params->lun_size_bytes;
@@ -2240,10 +2250,13 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
0 : (be_lun->size_blocks - 1);
- retval = ctl_be_block_open(softc, be_lun, req);
- if (retval != 0) {
- retval = 0;
- req->status = CTL_LUN_WARNING;
+ if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ||
+ control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) {
+ retval = ctl_be_block_open(softc, be_lun, req);
+ if (retval != 0) {
+ retval = 0;
+ req->status = CTL_LUN_WARNING;
+ }
}
num_threads = cbb_num_threads;
} else {
@@ -2601,8 +2614,9 @@ ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
struct ctl_lun_modify_params *params;
struct ctl_be_block_lun *be_lun;
struct ctl_be_lun *cbe_lun;
+ char *value;
uint64_t oldsize;
- int error;
+ int error, wasprim;
params = &req->reqdata.modify;
@@ -2625,23 +2639,51 @@ ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
be_lun->params.lun_size_bytes = params->lun_size_bytes;
ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
- oldsize = be_lun->size_blocks;
- if (be_lun->vn == NULL)
- error = ctl_be_block_open(softc, be_lun, req);
- else if (vn_isdisk(be_lun->vn, &error))
- error = ctl_be_block_modify_dev(be_lun, req);
- else if (be_lun->vn->v_type == VREG)
- error = ctl_be_block_modify_file(be_lun, req);
+ wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
+ value = ctl_get_opt(&cbe_lun->options, "ha_role");
+ if (value != NULL) {
+ if (strcmp(value, "primary") == 0)
+ cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+ else
+ cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
+ } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
+ cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
else
- error = EINVAL;
+ cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
+ if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
+ if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
+ ctl_lun_primary(cbe_lun);
+ else
+ ctl_lun_secondary(cbe_lun);
+ }
+ oldsize = be_lun->size_blocks;
+ if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ||
+ control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) {
+ if (be_lun->vn == NULL)
+ error = ctl_be_block_open(softc, be_lun, req);
+ else if (vn_isdisk(be_lun->vn, &error))
+ error = ctl_be_block_modify_dev(be_lun, req);
+ else if (be_lun->vn->v_type == VREG)
+ error = ctl_be_block_modify_file(be_lun, req);
+ else
+ error = EINVAL;
+ if ((cbe_lun->flags & CTL_LUN_FLAG_OFFLINE) &&
+ be_lun->vn != NULL) {
+ cbe_lun->flags &= ~CTL_LUN_FLAG_OFFLINE;
+ ctl_lun_online(cbe_lun);
+ }
+ } else {
+ if (be_lun->vn != NULL) {
+ cbe_lun->flags |= CTL_LUN_FLAG_OFFLINE;
+ ctl_lun_offline(cbe_lun);
+ pause("CTL LUN offline", hz / 8); // XXX
+ error = ctl_be_block_close(be_lun);
+ } else
+ error = 0;
+ }
if (be_lun->size_blocks != oldsize)
ctl_lun_capacity_changed(cbe_lun);
- if ((cbe_lun->flags & CTL_LUN_FLAG_OFFLINE) &&
- be_lun->vn != NULL) {
- cbe_lun->flags &= ~CTL_LUN_FLAG_OFFLINE;
- ctl_lun_online(cbe_lun);
- }
/* Tell the user the exact size we ended up using */
params->lun_size_bytes = be_lun->size_bytes;
diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c
index c39d1b6..c7e1803 100644
--- a/sys/cam/ctl/ctl_backend_ramdisk.c
+++ b/sys/cam/ctl/ctl_backend_ramdisk.c
@@ -56,14 +56,18 @@ __FBSDID("$FreeBSD$");
#include <sys/conf.h>
#include <sys/ioccom.h>
#include <sys/module.h>
+#include <sys/sysctl.h>
#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
#include <cam/ctl/ctl_io.h>
#include <cam/ctl/ctl.h>
#include <cam/ctl/ctl_util.h>
#include <cam/ctl/ctl_backend.h>
#include <cam/ctl/ctl_debug.h>
#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
#include <cam/ctl/ctl_error.h>
typedef enum {
@@ -101,6 +105,7 @@ struct ctl_be_ramdisk_softc {
};
static struct ctl_be_ramdisk_softc rd_softc;
+extern struct ctl_softc *control_softc;
int ctl_backend_ramdisk_init(void);
void ctl_backend_ramdisk_shutdown(void);
@@ -546,7 +551,13 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
else
cbe_lun->lun_type = T_DIRECT;
be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
- cbe_lun->flags = CTL_LUN_FLAG_PRIMARY;
+ cbe_lun->flags = 0;
+ value = ctl_get_opt(&cbe_lun->options, "ha_role");
+ if (value != NULL) {
+ if (strcmp(value, "primary") == 0)
+ cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+ } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
+ cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
if (cbe_lun->lun_type == T_DIRECT) {
if (params->blocksize_bytes != 0)
@@ -717,7 +728,9 @@ ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
struct ctl_be_ramdisk_lun *be_lun;
struct ctl_be_lun *cbe_lun;
struct ctl_lun_modify_params *params;
+ char *value;
uint32_t blocksize;
+ int wasprim;
params = &req->reqdata.modify;
@@ -739,15 +752,32 @@ ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
if (params->lun_size_bytes != 0)
be_lun->params.lun_size_bytes = params->lun_size_bytes;
ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
- blocksize = be_lun->cbe_lun.blocksize;
+ wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
+ value = ctl_get_opt(&cbe_lun->options, "ha_role");
+ if (value != NULL) {
+ if (strcmp(value, "primary") == 0)
+ cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+ else
+ cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
+ } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
+ cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+ else
+ cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
+ if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
+ if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
+ ctl_lun_primary(cbe_lun);
+ else
+ ctl_lun_secondary(cbe_lun);
+ }
+
+ blocksize = be_lun->cbe_lun.blocksize;
if (be_lun->params.lun_size_bytes < blocksize) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: LUN size %ju < blocksize %u", __func__,
be_lun->params.lun_size_bytes, blocksize);
goto bailout_error;
}
-
be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
be_lun->size_bytes = be_lun->size_blocks * blocksize;
be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
diff --git a/sys/cam/ctl/ctl_cmd_table.c b/sys/cam/ctl/ctl_cmd_table.c
index 9a7d70e..5b75468 100644
--- a/sys/cam/ctl/ctl_cmd_table.c
+++ b/sys/cam/ctl/ctl_cmd_table.c
@@ -69,8 +69,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -81,8 +80,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -93,8 +91,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -105,8 +102,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -123,8 +119,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -135,8 +130,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -147,8 +141,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -159,8 +152,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -171,8 +163,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -183,8 +174,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -195,8 +185,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -339,7 +328,7 @@ const struct ctl_cmd_entry ctl_cmd_table_84[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -436,7 +425,6 @@ const struct ctl_cmd_entry ctl_cmd_table_9e[32] =
{ctl_read_capacity_16, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_READCAP,
@@ -493,8 +481,8 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
{ctl_report_tagret_port_groups, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
+ CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -507,8 +495,8 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
{ctl_report_supported_opcodes, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
+ CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -518,8 +506,8 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
{ctl_report_supported_tmf, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
+ CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -532,8 +520,8 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
{ctl_report_timestamp, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
+ CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -563,8 +551,8 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_NO_SENSE |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
+ CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0xff, 0x07}},
@@ -624,8 +612,8 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_NO_SENSE |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
+ CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE, 6, {0xe1, 0xff, 0xff, 0xff, 0x07}},
@@ -640,8 +628,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 6, {0x11, 0, 0, 0xff, 0x07}},
@@ -650,8 +637,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}},
@@ -660,8 +646,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_NONE,
CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}},
@@ -675,8 +660,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
CTL_LUN_PAT_NONE, 6, {0x08, 0xff, 0xff, 0xff, 0x07}},
@@ -685,7 +669,6 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_start_stop, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_FLAG_DATA_NONE |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0x03, 0x07}},
@@ -721,7 +704,6 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_read_capacity, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN|
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_READCAP, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07}},
@@ -812,7 +794,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_write_buffer, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE,
10, {0x1f, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}},
@@ -821,7 +803,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
CTL_LUN_PAT_NONE,
@@ -911,8 +893,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 10, {0x11, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
@@ -921,8 +902,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 10, {0x02, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
@@ -931,8 +911,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 10, {0x02, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
@@ -946,8 +925,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
CTL_LUN_PAT_NONE, 10, {0x18, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
@@ -1199,8 +1177,8 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_NO_SENSE |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_OFFLINE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_CMD_FLAG_OK_ON_STANDBY |
+ CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@@ -1315,33 +1293,17 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
/* BF VOLUME SET OUT */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* C0 - ISC_SEND_MSG_SHORT */
-//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE,
- 16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+/* C0 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* C1 - ISC_SEND_MSG */
-//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE,
- 16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+/* C1 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* C2 - ISC_WRITE */
-//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE,
- 16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+/* C2 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* C3 - ISC_READ */
-//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_IN,
- CTL_LUN_PAT_NONE,
- 16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+/* C3 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* C4 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
diff --git a/sys/cam/ctl/ctl_error.c b/sys/cam/ctl/ctl_error.c
index 8d98204..8ef5176 100644
--- a/sys/cam/ctl/ctl_error.c
+++ b/sys/cam/ctl/ctl_error.c
@@ -723,6 +723,18 @@ ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio)
}
void
+ctl_set_lun_transit(struct ctl_scsiio *ctsio)
+{
+ /* "Logical unit not ready, asymmetric access state transition" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_NOT_READY,
+ /*asc*/ 0x04,
+ /*ascq*/ 0x0a,
+ SSD_ELEM_NONE);
+}
+
+void
ctl_set_lun_standby(struct ctl_scsiio *ctsio)
{
/* "Logical unit not ready, target port in standby state" */
@@ -735,6 +747,18 @@ ctl_set_lun_standby(struct ctl_scsiio *ctsio)
}
void
+ctl_set_lun_unavail(struct ctl_scsiio *ctsio)
+{
+ /* "Logical unit not ready, target port in unavailable state" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_NOT_READY,
+ /*asc*/ 0x04,
+ /*ascq*/ 0x0c,
+ SSD_ELEM_NONE);
+}
+
+void
ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio)
{
/* "Medium format corrupted" */
diff --git a/sys/cam/ctl/ctl_error.h b/sys/cam/ctl/ctl_error.h
index 8430eef..4fa8060 100644
--- a/sys/cam/ctl/ctl_error.h
+++ b/sys/cam/ctl/ctl_error.h
@@ -67,7 +67,9 @@ void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio);
void ctl_set_param_len_error(struct ctl_scsiio *ctsio);
void ctl_set_already_locked(struct ctl_scsiio *ctsio);
void ctl_set_unsupported_lun(struct ctl_scsiio *ctsio);
+void ctl_set_lun_transit(struct ctl_scsiio *ctsio);
void ctl_set_lun_standby(struct ctl_scsiio *ctsio);
+void ctl_set_lun_unavail(struct ctl_scsiio *ctsio);
void ctl_set_internal_failure(struct ctl_scsiio *ctsio, int sks_valid,
uint16_t retry_count);
void ctl_set_medium_error(struct ctl_scsiio *ctsio);
diff --git a/sys/cam/ctl/ctl_frontend.c b/sys/cam/ctl/ctl_frontend.c
index 5554951..c9c75d4 100644
--- a/sys/cam/ctl/ctl_frontend.c
+++ b/sys/cam/ctl/ctl_frontend.c
@@ -140,6 +140,7 @@ int
ctl_port_register(struct ctl_port *port)
{
struct ctl_softc *softc = control_softc;
+ struct ctl_port *tport, *nport;
void *pool;
int port_num;
int retval;
@@ -149,10 +150,13 @@ ctl_port_register(struct ctl_port *port)
KASSERT(softc != NULL, ("CTL is not initialized"));
mtx_lock(&softc->ctl_lock);
- port_num = ctl_ffz(softc->ctl_port_mask, CTL_MAX_PORTS);
- if ((port_num == -1)
- || (ctl_set_mask(softc->ctl_port_mask, port_num) == -1)) {
- port->targ_port = -1;
+ if (port->targ_port >= 0)
+ port_num = port->targ_port;
+ else
+ port_num = ctl_ffz(softc->ctl_port_mask,
+ softc->port_min, softc->port_max);
+ if ((port_num < 0) ||
+ (ctl_set_mask(softc->ctl_port_mask, port_num) < 0)) {
mtx_unlock(&softc->ctl_lock);
return (1);
}
@@ -195,10 +199,17 @@ error:
STAILQ_INIT(&port->options);
mtx_lock(&softc->ctl_lock);
- port->targ_port = port_num + softc->port_offset;
+ port->targ_port = port_num;
STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links);
- STAILQ_INSERT_TAIL(&softc->port_list, port, links);
- softc->ctl_ports[port_num] = port;
+ for (tport = NULL, nport = STAILQ_FIRST(&softc->port_list);
+ nport != NULL && nport->targ_port < port_num;
+ tport = nport, nport = STAILQ_NEXT(tport, links)) {
+ }
+ if (tport)
+ STAILQ_INSERT_AFTER(&softc->port_list, tport, port, links);
+ else
+ STAILQ_INSERT_HEAD(&softc->port_list, port, links);
+ softc->ctl_ports[port->targ_port] = port;
mtx_unlock(&softc->ctl_lock);
return (retval);
@@ -209,7 +220,7 @@ ctl_port_deregister(struct ctl_port *port)
{
struct ctl_softc *softc = control_softc;
struct ctl_io_pool *pool;
- int port_num, retval, i;
+ int retval, i;
retval = 0;
@@ -224,10 +235,8 @@ ctl_port_deregister(struct ctl_port *port)
STAILQ_REMOVE(&softc->port_list, port, ctl_port, links);
STAILQ_REMOVE(&port->frontend->port_list, port, ctl_port, fe_links);
softc->num_ports--;
- port_num = (port->targ_port < CTL_MAX_PORTS) ? port->targ_port :
- port->targ_port - CTL_MAX_PORTS;
- ctl_clear_mask(softc->ctl_port_mask, port_num);
- softc->ctl_ports[port_num] = NULL;
+ ctl_clear_mask(softc->ctl_port_mask, port->targ_port);
+ softc->ctl_ports[port->targ_port] = NULL;
mtx_unlock(&softc->ctl_lock);
ctl_pool_free(pool);
@@ -321,6 +330,7 @@ ctl_port_online(struct ctl_port *port)
port->port_online(port->onoff_arg);
/* XXX KDM need a lock here? */
port->status |= CTL_PORT_STATUS_ONLINE;
+ ctl_isc_announce_port(port);
}
void
@@ -347,6 +357,7 @@ ctl_port_offline(struct ctl_port *port)
}
/* XXX KDM need a lock here? */
port->status &= ~CTL_PORT_STATUS_ONLINE;
+ ctl_isc_announce_port(port);
}
/*
diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c
index 5ab03c9..01807de 100644
--- a/sys/cam/ctl/ctl_frontend_cam_sim.c
+++ b/sys/cam/ctl/ctl_frontend_cam_sim.c
@@ -157,6 +157,7 @@ cfcs_init(void)
/* XXX These should probably be fetched from CTL. */
port->max_targets = 1;
port->max_target_id = 15;
+ port->targ_port = -1;
retval = ctl_port_register(port);
if (retval != 0) {
diff --git a/sys/cam/ctl/ctl_frontend_ioctl.c b/sys/cam/ctl/ctl_frontend_ioctl.c
index 51e8508..67156bc 100644
--- a/sys/cam/ctl/ctl_frontend_ioctl.c
+++ b/sys/cam/ctl/ctl_frontend_ioctl.c
@@ -93,6 +93,7 @@ cfi_init(void)
port->fe_done = cfi_done;
port->max_targets = 1;
port->max_target_id = 0;
+ port->targ_port = -1;
port->max_initiators = 1;
if (ctl_port_register(port) != 0) {
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.c b/sys/cam/ctl/ctl_frontend_iscsi.c
index 231c1c0..be7e917 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.c
+++ b/sys/cam/ctl/ctl_frontend_iscsi.c
@@ -2033,6 +2033,7 @@ cfiscsi_ioctl_port_create(struct ctl_req *req)
/* XXX These should probably be fetched from CTL. */
port->max_targets = 1;
port->max_target_id = 15;
+ port->targ_port = -1;
port->options = opts;
STAILQ_INIT(&opts);
diff --git a/sys/cam/ctl/ctl_ha.c b/sys/cam/ctl/ctl_ha.c
new file mode 100644
index 0000000..8d6604f
--- /dev/null
+++ b/sys/cam/ctl/ctl_ha.c
@@ -0,0 +1,958 @@
+/*-
+ * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/types.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/proc.h>
+#include <sys/conf.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/uio.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <vm/uma.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_error.h>
+
+#if (__FreeBSD_version < 1100000)
+struct mbufq {
+ struct mbuf *head;
+ struct mbuf *tail;
+};
+
+static void
+mbufq_init(struct mbufq *q, int limit)
+{
+
+ q->head = q->tail = NULL;
+}
+
+static void
+mbufq_drain(struct mbufq *q)
+{
+ struct mbuf *m;
+
+ while ((m = q->head) != NULL) {
+ q->head = m->m_nextpkt;
+ m_freem(m);
+ }
+ q->tail = NULL;
+}
+
+static struct mbuf *
+mbufq_dequeue(struct mbufq *q)
+{
+ struct mbuf *m;
+
+ m = q->head;
+ if (m) {
+ if (q->tail == m)
+ q->tail = NULL;
+ q->head = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ }
+ return (m);
+}
+
+static void
+mbufq_enqueue(struct mbufq *q, struct mbuf *m)
+{
+
+ m->m_nextpkt = NULL;
+ if (q->tail)
+ q->tail->m_nextpkt = m;
+ else
+ q->head = m;
+ q->tail = m;
+}
+
+static u_int
+sbavail(struct sockbuf *sb)
+{
+ return (sb->sb_cc);
+}
+
+#if (__FreeBSD_version < 1000000)
+#define mtodo(m, o) ((void *)(((m)->m_data) + (o)))
+#endif
+#endif
+
+struct ha_msg_wire {
+ uint32_t channel;
+ uint32_t length;
+};
+
+struct ha_dt_msg_wire {
+ ctl_ha_dt_cmd command;
+ uint32_t size;
+ uint8_t *local;
+ uint8_t *remote;
+};
+
+struct ha_softc {
+ struct ctl_softc *ha_ctl_softc;
+ ctl_evt_handler ha_handler[CTL_HA_CHAN_MAX];
+ char ha_peer[128];
+ struct sockaddr_in ha_peer_in;
+ struct socket *ha_lso;
+ struct socket *ha_so;
+ struct mbufq ha_sendq;
+ struct mbuf *ha_sending;
+ struct mtx ha_lock;
+ int ha_connect;
+ int ha_listen;
+ int ha_connected;
+ int ha_receiving;
+ int ha_wakeup;
+ int ha_disconnect;
+ TAILQ_HEAD(, ctl_ha_dt_req) ha_dts;
+} ha_softc;
+
+extern struct ctl_softc *control_softc;
+
+static void
+ctl_ha_conn_wake(struct ha_softc *softc)
+{
+
+ mtx_lock(&softc->ha_lock);
+ softc->ha_wakeup = 1;
+ mtx_unlock(&softc->ha_lock);
+ wakeup(&softc->ha_wakeup);
+}
+
+static int
+ctl_ha_lupcall(struct socket *so, void *arg, int waitflag)
+{
+ struct ha_softc *softc = arg;
+
+ ctl_ha_conn_wake(softc);
+ return (SU_OK);
+}
+
+static int
+ctl_ha_rupcall(struct socket *so, void *arg, int waitflag)
+{
+ struct ha_softc *softc = arg;
+
+ wakeup(&softc->ha_receiving);
+ return (SU_OK);
+}
+
+static int
+ctl_ha_supcall(struct socket *so, void *arg, int waitflag)
+{
+ struct ha_softc *softc = arg;
+
+ ctl_ha_conn_wake(softc);
+ return (SU_OK);
+}
+
+static void
+ctl_ha_evt(struct ha_softc *softc, ctl_ha_channel ch, ctl_ha_event evt,
+ int param)
+{
+ int i;
+
+ if (ch < CTL_HA_CHAN_MAX) {
+ if (softc->ha_handler[ch])
+ softc->ha_handler[ch](ch, evt, param);
+ return;
+ }
+ for (i = 0; i < CTL_HA_CHAN_MAX; i++) {
+ if (softc->ha_handler[i])
+ softc->ha_handler[i](i, evt, param);
+ }
+}
+
+static void
+ctl_ha_close(struct ha_softc *softc)
+{
+ struct socket *so = softc->ha_so;
+ int report = 0;
+
+ if (softc->ha_connected || softc->ha_disconnect) {
+ softc->ha_connected = 0;
+ mbufq_drain(&softc->ha_sendq);
+ m_freem(softc->ha_sending);
+ softc->ha_sending = NULL;
+ report = 1;
+ }
+ if (so) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ soupcall_clear(so, SO_RCV);
+ while (softc->ha_receiving) {
+ wakeup(&softc->ha_receiving);
+ msleep(&softc->ha_receiving, SOCKBUF_MTX(&so->so_rcv),
+ 0, "ha_rx exit", 0);
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_LOCK(&so->so_snd);
+ soupcall_clear(so, SO_SND);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ softc->ha_so = NULL;
+ if (softc->ha_connect)
+ pause("reconnect", hz / 2);
+ soclose(so);
+ }
+ if (report) {
+ ctl_ha_evt(softc, CTL_HA_CHAN_MAX, CTL_HA_EVT_LINK_CHANGE,
+ (softc->ha_connect || softc->ha_listen) ?
+ CTL_HA_LINK_UNKNOWN : CTL_HA_LINK_OFFLINE);
+ }
+}
+
+static void
+ctl_ha_lclose(struct ha_softc *softc)
+{
+
+ if (softc->ha_lso) {
+ SOCKBUF_LOCK(&softc->ha_lso->so_rcv);
+ soupcall_clear(softc->ha_lso, SO_RCV);
+ SOCKBUF_UNLOCK(&softc->ha_lso->so_rcv);
+ soclose(softc->ha_lso);
+ softc->ha_lso = NULL;
+ }
+}
+
+static void
+ctl_ha_rx_thread(void *arg)
+{
+ struct ha_softc *softc = arg;
+ struct socket *so = softc->ha_so;
+ struct ha_msg_wire wire_hdr;
+ struct uio uio;
+ struct iovec iov;
+ int error, flags, next;
+
+ bzero(&wire_hdr, sizeof(wire_hdr));
+ while (1) {
+ if (wire_hdr.length > 0)
+ next = wire_hdr.length;
+ else
+ next = sizeof(wire_hdr);
+ SOCKBUF_LOCK(&so->so_rcv);
+ while (sbavail(&so->so_rcv) < next) {
+ if (softc->ha_connected == 0 || so->so_error ||
+ (so->so_rcv.sb_state & SBS_CANTRCVMORE)) {
+ goto errout;
+ }
+ so->so_rcv.sb_lowat = next;
+ msleep(&softc->ha_receiving, SOCKBUF_MTX(&so->so_rcv),
+ 0, "-", 0);
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ if (wire_hdr.length == 0) {
+ iov.iov_base = &wire_hdr;
+ iov.iov_len = sizeof(wire_hdr);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_rw = UIO_READ;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_td = curthread;
+ uio.uio_resid = sizeof(wire_hdr);
+ flags = MSG_DONTWAIT;
+ error = soreceive(softc->ha_so, NULL, &uio, NULL,
+ NULL, &flags);
+ if (error != 0) {
+ printf("%s: header receive error %d\n",
+ __func__, error);
+ SOCKBUF_LOCK(&so->so_rcv);
+ goto errout;
+ }
+ } else {
+ ctl_ha_evt(softc, wire_hdr.channel,
+ CTL_HA_EVT_MSG_RECV, wire_hdr.length);
+ wire_hdr.length = 0;
+ }
+ }
+
+errout:
+ softc->ha_receiving = 0;
+ wakeup(&softc->ha_receiving);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ ctl_ha_conn_wake(softc);
+ kthread_exit();
+}
+
+static void
+ctl_ha_send(struct ha_softc *softc)
+{
+ struct socket *so = softc->ha_so;
+ int error;
+
+ while (1) {
+ if (softc->ha_sending == NULL) {
+ mtx_lock(&softc->ha_lock);
+ softc->ha_sending = mbufq_dequeue(&softc->ha_sendq);
+ mtx_unlock(&softc->ha_lock);
+ if (softc->ha_sending == NULL) {
+ so->so_snd.sb_lowat = so->so_snd.sb_hiwat + 1;
+ break;
+ }
+ }
+ SOCKBUF_LOCK(&so->so_snd);
+ if (sbspace(&so->so_snd) < softc->ha_sending->m_pkthdr.len) {
+ so->so_snd.sb_lowat = softc->ha_sending->m_pkthdr.len;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ break;
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = sosend(softc->ha_so, NULL, NULL, softc->ha_sending,
+ NULL, MSG_DONTWAIT, curthread);
+ softc->ha_sending = NULL;
+ if (error != 0) {
+ printf("%s: sosend() error %d\n", __func__, error);
+ return;
+ }
+ };
+}
+
+static void
+ctl_ha_sock_setup(struct ha_softc *softc)
+{
+ struct sockopt opt;
+ struct socket *so = softc->ha_so;
+ int error, val;
+
+ val = 1024 * 1024;
+ error = soreserve(so, val, val);
+ if (error)
+ printf("%s: soreserve failed %d\n", __func__, error);
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_rcv.sb_lowat = sizeof(struct ha_msg_wire);
+ soupcall_set(so, SO_RCV, ctl_ha_rupcall, softc);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_LOCK(&so->so_snd);
+ so->so_snd.sb_lowat = sizeof(struct ha_msg_wire);
+ soupcall_set(so, SO_SND, ctl_ha_supcall, softc);
+ SOCKBUF_UNLOCK(&so->so_snd);
+
+ bzero(&opt, sizeof(struct sockopt));
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = SOL_SOCKET;
+ opt.sopt_name = SO_KEEPALIVE;
+ opt.sopt_val = &val;
+ opt.sopt_valsize = sizeof(val);
+ val = 1;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPALIVE setting failed %d\n", __func__, error);
+
+ opt.sopt_level = IPPROTO_TCP;
+ opt.sopt_name = TCP_NODELAY;
+ val = 1;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: NODELAY setting failed %d\n", __func__, error);
+
+ opt.sopt_name = TCP_KEEPINIT;
+ val = 3;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPINIT setting failed %d\n", __func__, error);
+
+ opt.sopt_name = TCP_KEEPIDLE;
+ val = 1;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPIDLE setting failed %d\n", __func__, error);
+
+ opt.sopt_name = TCP_KEEPINTVL;
+ val = 1;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPINTVL setting failed %d\n", __func__, error);
+
+ opt.sopt_name = TCP_KEEPCNT;
+ val = 5;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPCNT setting failed %d\n", __func__, error);
+}
+
+static int
+ctl_ha_connect(struct ha_softc *softc)
+{
+ struct thread *td = curthread;
+ struct socket *so;
+ int error;
+
+ /* Create the socket */
+ error = socreate(PF_INET, &so, SOCK_STREAM,
+ IPPROTO_TCP, td->td_ucred, td);
+ if (error != 0) {
+ printf("%s: socreate() error %d\n", __func__, error);
+ return (error);
+ }
+ softc->ha_so = so;
+ ctl_ha_sock_setup(softc);
+
+ error = soconnect(so, (struct sockaddr *)&softc->ha_peer_in, td);
+ if (error != 0) {
+ printf("%s: soconnect() error %d\n", __func__, error);
+ goto out;
+ }
+ return (0);
+
+out:
+ ctl_ha_close(softc);
+ return (error);
+}
+
+static int
+ctl_ha_accept(struct ha_softc *softc)
+{
+ struct socket *so;
+ struct sockaddr *sap;
+ int error;
+
+ ACCEPT_LOCK();
+ if (softc->ha_lso->so_rcv.sb_state & SBS_CANTRCVMORE)
+ softc->ha_lso->so_error = ECONNABORTED;
+ if (softc->ha_lso->so_error) {
+ error = softc->ha_lso->so_error;
+ softc->ha_lso->so_error = 0;
+ ACCEPT_UNLOCK();
+ printf("%s: socket error %d\n", __func__, error);
+ goto out;
+ }
+ so = TAILQ_FIRST(&softc->ha_lso->so_comp);
+ if (so == NULL) {
+ ACCEPT_UNLOCK();
+ return (EWOULDBLOCK);
+ }
+ KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
+ KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
+
+ /*
+ * Before changing the flags on the socket, we have to bump the
+ * reference count. Otherwise, if the protocol calls sofree(),
+ * the socket will be released due to a zero refcount.
+ */
+ SOCK_LOCK(so); /* soref() and so_state update */
+ soref(so); /* file descriptor reference */
+
+ TAILQ_REMOVE(&softc->ha_lso->so_comp, so, so_list);
+ softc->ha_lso->so_qlen--;
+ so->so_state |= SS_NBIO;
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+
+ sap = NULL;
+ error = soaccept(so, &sap);
+ if (error != 0) {
+ printf("%s: soaccept() error %d\n", __func__, error);
+ if (sap != NULL)
+ free(sap, M_SONAME);
+ goto out;
+ }
+ if (sap != NULL)
+ free(sap, M_SONAME);
+ softc->ha_so = so;
+ ctl_ha_sock_setup(softc);
+ return (0);
+
+out:
+ ctl_ha_lclose(softc);
+ return (error);
+}
+
+static int
+ctl_ha_listen(struct ha_softc *softc)
+{
+ struct thread *td = curthread;
+ struct sockopt opt;
+ int error, val;
+
+ /* Create the socket */
+ if (softc->ha_lso == NULL) {
+ error = socreate(PF_INET, &softc->ha_lso, SOCK_STREAM,
+ IPPROTO_TCP, td->td_ucred, td);
+ if (error != 0) {
+ printf("%s: socreate() error %d\n", __func__, error);
+ return (error);
+ }
+ bzero(&opt, sizeof(struct sockopt));
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = SOL_SOCKET;
+ opt.sopt_name = SO_REUSEADDR;
+ opt.sopt_val = &val;
+ opt.sopt_valsize = sizeof(val);
+ val = 1;
+ error = sosetopt(softc->ha_lso, &opt);
+ if (error) {
+ printf("%s: REUSEADDR setting failed %d\n",
+ __func__, error);
+ }
+ SOCKBUF_LOCK(&softc->ha_lso->so_rcv);
+ soupcall_set(softc->ha_lso, SO_RCV, ctl_ha_lupcall, softc);
+ SOCKBUF_UNLOCK(&softc->ha_lso->so_rcv);
+ }
+
+ error = sobind(softc->ha_lso, (struct sockaddr *)&softc->ha_peer_in, td);
+ if (error != 0) {
+ printf("%s: sobind() error %d\n", __func__, error);
+ goto out;
+ }
+ error = solisten(softc->ha_lso, 1, td);
+ if (error != 0) {
+ printf("%s: solisten() error %d\n", __func__, error);
+ goto out;
+ }
+ return (0);
+
+out:
+ ctl_ha_lclose(softc);
+ return (error);
+}
+
+static void
+ctl_ha_conn_thread(void *arg)
+{
+ struct ha_softc *softc = arg;
+ int error;
+
+ while (1) {
+ if (softc->ha_disconnect) {
+ ctl_ha_close(softc);
+ ctl_ha_lclose(softc);
+ softc->ha_disconnect = 0;
+ } else if (softc->ha_so != NULL &&
+ (softc->ha_so->so_error ||
+ softc->ha_so->so_rcv.sb_state & SBS_CANTRCVMORE))
+ ctl_ha_close(softc);
+ if (softc->ha_so == NULL) {
+ if (softc->ha_lso != NULL)
+ ctl_ha_accept(softc);
+ else if (softc->ha_listen)
+ ctl_ha_listen(softc);
+ else if (softc->ha_connect)
+ ctl_ha_connect(softc);
+ }
+ if (softc->ha_so != NULL) {
+ if (softc->ha_connected == 0 &&
+ softc->ha_so->so_error == 0 &&
+ (softc->ha_so->so_state & SS_ISCONNECTING) == 0) {
+ softc->ha_connected = 1;
+ ctl_ha_evt(softc, CTL_HA_CHAN_MAX,
+ CTL_HA_EVT_LINK_CHANGE,
+ CTL_HA_LINK_ONLINE);
+ softc->ha_receiving = 1;
+ error = kproc_kthread_add(ctl_ha_rx_thread,
+ softc, &softc->ha_ctl_softc->ctl_proc,
+ NULL, 0, 0, "ctl", "ha_rx");
+ if (error != 0) {
+ printf("Error creating CTL HA rx thread!\n");
+ softc->ha_receiving = 0;
+ softc->ha_disconnect = 1;
+ }
+ }
+ ctl_ha_send(softc);
+ }
+ mtx_lock(&softc->ha_lock);
+ if (softc->ha_so != NULL &&
+ (softc->ha_so->so_error ||
+ softc->ha_so->so_rcv.sb_state & SBS_CANTRCVMORE))
+ ;
+ else if (!softc->ha_wakeup)
+ msleep(&softc->ha_wakeup, &softc->ha_lock, 0, "-", hz);
+ softc->ha_wakeup = 0;
+ mtx_unlock(&softc->ha_lock);
+ }
+}
+
+static int
+ctl_ha_peer_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct ha_softc *softc = (struct ha_softc *)arg1;
+ struct sockaddr_in *sa;
+ int error, b1, b2, b3, b4, p, num;
+
+ error = sysctl_handle_string(oidp, softc->ha_peer,
+ sizeof(softc->ha_peer), req);
+ if ((error != 0) || (req->newptr == NULL))
+ return (error);
+
+ sa = &softc->ha_peer_in;
+ mtx_lock(&softc->ha_lock);
+ if ((num = sscanf(softc->ha_peer, "connect %d.%d.%d.%d:%d",
+ &b1, &b2, &b3, &b4, &p)) >= 4) {
+ softc->ha_connect = 1;
+ softc->ha_listen = 0;
+ } else if ((num = sscanf(softc->ha_peer, "listen %d.%d.%d.%d:%d",
+ &b1, &b2, &b3, &b4, &p)) >= 4) {
+ softc->ha_connect = 0;
+ softc->ha_listen = 1;
+ } else {
+ softc->ha_connect = 0;
+ softc->ha_listen = 0;
+ if (softc->ha_peer[0] != 0)
+ error = EINVAL;
+ }
+ if (softc->ha_connect || softc->ha_listen) {
+ memset(sa, 0, sizeof(*sa));
+ sa->sin_len = sizeof(struct sockaddr_in);
+ sa->sin_family = AF_INET;
+ sa->sin_port = htons((num >= 5) ? p : 999);
+ sa->sin_addr.s_addr =
+ htonl((b1 << 24) + (b2 << 16) + (b3 << 8) + b4);
+ }
+ softc->ha_disconnect = 1;
+ softc->ha_wakeup = 1;
+ mtx_unlock(&softc->ha_lock);
+ wakeup(&softc->ha_wakeup);
+ return (error);
+}
+
+ctl_ha_status
+ctl_ha_msg_register(ctl_ha_channel channel, ctl_evt_handler handler)
+{
+ struct ha_softc *softc = &ha_softc;
+
+ KASSERT(channel < CTL_HA_CHAN_MAX,
+ ("Wrong CTL HA channel %d", channel));
+ softc->ha_handler[channel] = handler;
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+ctl_ha_status
+ctl_ha_msg_deregister(ctl_ha_channel channel)
+{
+ struct ha_softc *softc = &ha_softc;
+
+ KASSERT(channel < CTL_HA_CHAN_MAX,
+ ("Wrong CTL HA channel %d", channel));
+ softc->ha_handler[channel] = NULL;
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+/*
+ * Receive a message of the specified size.
+ */
+ctl_ha_status
+ctl_ha_msg_recv(ctl_ha_channel channel, void *addr, size_t len,
+ int wait)
+{
+ struct ha_softc *softc = &ha_softc;
+ struct uio uio;
+ struct iovec iov;
+ int error, flags;
+
+ if (!softc->ha_connected)
+ return (CTL_HA_STATUS_DISCONNECT);
+
+ iov.iov_base = addr;
+ iov.iov_len = len;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_rw = UIO_READ;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_td = curthread;
+ uio.uio_resid = len;
+ flags = wait ? 0 : MSG_DONTWAIT;
+ error = soreceive(softc->ha_so, NULL, &uio, NULL, NULL, &flags);
+ if (error == 0)
+ return (CTL_HA_STATUS_SUCCESS);
+
+ /* Consider all errors fatal for HA sanity. */
+ mtx_lock(&softc->ha_lock);
+ if (softc->ha_connected) {
+ softc->ha_disconnect = 1;
+ softc->ha_wakeup = 1;
+ wakeup(&softc->ha_wakeup);
+ }
+ mtx_unlock(&softc->ha_lock);
+ return (CTL_HA_STATUS_ERROR);
+}
+
+/*
+ * Send a message of the specified size.
+ */
+ctl_ha_status
+ctl_ha_msg_send2(ctl_ha_channel channel, const void *addr, size_t len,
+ const void *addr2, size_t len2, int wait)
+{
+ struct ha_softc *softc = &ha_softc;
+ struct mbuf *mb, *newmb;
+ struct ha_msg_wire hdr;
+ size_t copylen, off;
+
+ if (!softc->ha_connected)
+ return (CTL_HA_STATUS_DISCONNECT);
+
+ newmb = m_getm2(NULL, sizeof(hdr) + len + len2, wait, MT_DATA,
+ M_PKTHDR);
+ if (newmb == NULL) {
+ /* Consider all errors fatal for HA sanity. */
+ mtx_lock(&softc->ha_lock);
+ if (softc->ha_connected) {
+ softc->ha_disconnect = 1;
+ softc->ha_wakeup = 1;
+ wakeup(&softc->ha_wakeup);
+ }
+ mtx_unlock(&softc->ha_lock);
+ printf("%s: Can't allocate mbuf chain\n", __func__);
+ return (CTL_HA_STATUS_ERROR);
+ }
+ hdr.channel = channel;
+ hdr.length = len + len2;
+ mb = newmb;
+ memcpy(mtodo(mb, 0), &hdr, sizeof(hdr));
+ mb->m_len += sizeof(hdr);
+ off = 0;
+ for (; mb != NULL && off < len; mb = mb->m_next) {
+ copylen = min(M_TRAILINGSPACE(mb), len - off);
+ memcpy(mtodo(mb, mb->m_len), (const char *)addr + off, copylen);
+ mb->m_len += copylen;
+ off += copylen;
+ if (off == len)
+ break;
+ }
+ KASSERT(off == len, ("%s: off (%zu) != len (%zu)", __func__,
+ off, len));
+ off = 0;
+ for (; mb != NULL && off < len2; mb = mb->m_next) {
+ copylen = min(M_TRAILINGSPACE(mb), len2 - off);
+ memcpy(mtodo(mb, mb->m_len), (const char *)addr2 + off, copylen);
+ mb->m_len += copylen;
+ off += copylen;
+ }
+ KASSERT(off == len2, ("%s: off (%zu) != len2 (%zu)", __func__,
+ off, len2));
+ newmb->m_pkthdr.len = sizeof(hdr) + len + len2;
+
+ mtx_lock(&softc->ha_lock);
+ if (!softc->ha_connected) {
+ mtx_unlock(&softc->ha_lock);
+ m_freem(newmb);
+ return (CTL_HA_STATUS_DISCONNECT);
+ }
+ mbufq_enqueue(&softc->ha_sendq, newmb);
+ softc->ha_wakeup = 1;
+ mtx_unlock(&softc->ha_lock);
+ wakeup(&softc->ha_wakeup);
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+ctl_ha_status
+ctl_ha_msg_send(ctl_ha_channel channel, const void *addr, size_t len,
+ int wait)
+{
+
+ return (ctl_ha_msg_send2(channel, addr, len, NULL, 0, wait));
+}
+
+/*
+ * Allocate a data transfer request structure.
+ */
+struct ctl_ha_dt_req *
+ctl_dt_req_alloc(void)
+{
+
+ return (malloc(sizeof(struct ctl_ha_dt_req), M_CTL, M_WAITOK | M_ZERO));
+}
+
+/*
+ * Free a data transfer request structure.
+ */
+void
+ctl_dt_req_free(struct ctl_ha_dt_req *req)
+{
+
+ free(req, M_CTL);
+}
+
+/*
+ * Issue a DMA request for a single buffer.
+ */
+ctl_ha_status
+ctl_dt_single(struct ctl_ha_dt_req *req)
+{
+ struct ha_softc *softc = &ha_softc;
+ struct ha_dt_msg_wire wire_dt;
+ ctl_ha_status status;
+
+ wire_dt.command = req->command;
+ wire_dt.size = req->size;
+ wire_dt.local = req->local;
+ wire_dt.remote = req->remote;
+ if (req->command == CTL_HA_DT_CMD_READ && req->callback != NULL) {
+ mtx_lock(&softc->ha_lock);
+ TAILQ_INSERT_TAIL(&softc->ha_dts, req, links);
+ mtx_unlock(&softc->ha_lock);
+ ctl_ha_msg_send(CTL_HA_CHAN_DATA, &wire_dt, sizeof(wire_dt),
+ M_WAITOK);
+ return (CTL_HA_STATUS_WAIT);
+ }
+ if (req->command == CTL_HA_DT_CMD_READ) {
+ status = ctl_ha_msg_send(CTL_HA_CHAN_DATA, &wire_dt,
+ sizeof(wire_dt), M_WAITOK);
+ } else {
+ status = ctl_ha_msg_send2(CTL_HA_CHAN_DATA, &wire_dt,
+ sizeof(wire_dt), req->local, req->size, M_WAITOK);
+ }
+ return (status);
+}
+
+static void
+ctl_dt_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
+{
+ struct ha_softc *softc = &ha_softc;
+ struct ctl_ha_dt_req *req;
+ ctl_ha_status isc_status;
+
+ if (event == CTL_HA_EVT_MSG_RECV) {
+ struct ha_dt_msg_wire wire_dt;
+ uint8_t *tmp;
+ int size;
+
+ size = min(sizeof(wire_dt), param);
+ isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_DATA, &wire_dt,
+ size, M_WAITOK);
+ if (isc_status != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: Error receiving message: %d\n",
+ __func__, isc_status);
+ return;
+ }
+
+ if (wire_dt.command == CTL_HA_DT_CMD_READ) {
+ wire_dt.command = CTL_HA_DT_CMD_WRITE;
+ tmp = wire_dt.local;
+ wire_dt.local = wire_dt.remote;
+ wire_dt.remote = tmp;
+ ctl_ha_msg_send2(CTL_HA_CHAN_DATA, &wire_dt,
+ sizeof(wire_dt), wire_dt.local, wire_dt.size,
+ M_WAITOK);
+ } else if (wire_dt.command == CTL_HA_DT_CMD_WRITE) {
+ isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_DATA,
+ wire_dt.remote, wire_dt.size, M_WAITOK);
+ mtx_lock(&softc->ha_lock);
+ TAILQ_FOREACH(req, &softc->ha_dts, links) {
+ if (req->local == wire_dt.remote) {
+ TAILQ_REMOVE(&softc->ha_dts, req, links);
+ break;
+ }
+ }
+ mtx_unlock(&softc->ha_lock);
+ if (req) {
+ req->ret = isc_status;
+ req->callback(req);
+ }
+ }
+ } else if (event == CTL_HA_EVT_LINK_CHANGE) {
+ CTL_DEBUG_PRINT(("%s: Link state change to %d\n", __func__,
+ param));
+ if (param != CTL_HA_LINK_ONLINE) {
+ mtx_lock(&softc->ha_lock);
+ while ((req = TAILQ_FIRST(&softc->ha_dts)) != NULL) {
+ TAILQ_REMOVE(&softc->ha_dts, req, links);
+ mtx_unlock(&softc->ha_lock);
+ req->ret = CTL_HA_STATUS_DISCONNECT;
+ req->callback(req);
+ mtx_lock(&softc->ha_lock);
+ }
+ mtx_unlock(&softc->ha_lock);
+ }
+ } else {
+ printf("%s: Unknown event %d\n", __func__, event);
+ }
+}
+
+
+ctl_ha_status
+ctl_ha_msg_init(struct ctl_softc *ctl_softc)
+{
+ struct ha_softc *softc = &ha_softc;
+ int error;
+
+ softc->ha_ctl_softc = ctl_softc;
+ mtx_init(&softc->ha_lock, "CTL HA mutex", NULL, MTX_DEF);
+ mbufq_init(&softc->ha_sendq, INT_MAX);
+ TAILQ_INIT(&softc->ha_dts);
+ error = kproc_kthread_add(ctl_ha_conn_thread, softc,
+ &ctl_softc->ctl_proc, NULL, 0, 0, "ctl", "ha_tx");
+ if (error != 0) {
+ printf("error creating CTL HA connection thread!\n");
+ mtx_destroy(&softc->ha_lock);
+ return (CTL_HA_STATUS_ERROR);
+ }
+ SYSCTL_ADD_PROC(&ctl_softc->sysctl_ctx,
+ SYSCTL_CHILDREN(ctl_softc->sysctl_tree),
+ OID_AUTO, "ha_peer", CTLTYPE_STRING | CTLFLAG_RWTUN,
+ softc, 0, ctl_ha_peer_sysctl, "A", "HA peer connection method");
+
+ if (ctl_ha_msg_register(CTL_HA_CHAN_DATA, ctl_dt_event_handler)
+ != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: ctl_ha_msg_register failed.\n", __func__);
+ }
+
+ return (CTL_HA_STATUS_SUCCESS);
+};
+
+ctl_ha_status
+ctl_ha_msg_shutdown(struct ctl_softc *ctl_softc)
+{
+ struct ha_softc *softc = &ha_softc;
+
+ if (ctl_ha_msg_deregister(CTL_HA_CHAN_DATA) != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: ctl_ha_msg_deregister failed.\n", __func__);
+ }
+
+ mtx_destroy(&softc->ha_lock);
+ return (CTL_HA_STATUS_SUCCESS);
+};
diff --git a/sys/cam/ctl/ctl_ha.h b/sys/cam/ctl/ctl_ha.h
index 0c004b3..072492a 100644
--- a/sys/cam/ctl/ctl_ha.h
+++ b/sys/cam/ctl/ctl_ha.h
@@ -1,6 +1,7 @@
/*-
* Copyright (c) 2003-2009 Silicon Graphics International Corp.
* Copyright (c) 2011 Spectra Logic Corporation
+ * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,80 +39,27 @@
/*
* CTL High Availability Modes:
*
- * CTL_HA_MODE_ACT_STBY: One side is in Active state and processing commands,
- * the other side is in Standby state, returning errors.
- * CTL_HA_MODE_SER_ONLY: Commands are serialized to the other side. Write
- * mirroring and read re-direction are assumed to
- * happen in the back end.
- * CTL_HA_MODE_XFER: Commands are serialized and data is transferred
- * for write mirroring and read re-direction.
+ * CTL_HA_MODE_ACT_STBY: Commands are serialized to the master side.
+ * No media access commands on slave side (Standby).
+ * CTL_HA_MODE_SER_ONLY: Commands are serialized to the master side.
+ * Media can be accessed on both sides.
+ * CTL_HA_MODE_XFER: Commands and data are forwarded to the
+ * master side for execution.
*/
-
typedef enum {
CTL_HA_MODE_ACT_STBY,
CTL_HA_MODE_SER_ONLY,
CTL_HA_MODE_XFER
} ctl_ha_mode;
-
-/*
- * This is a stubbed out High Availability interface. It assumes two nodes
- * staying in sync.
- *
- * The reason this interface is here, and stubbed out, is that CTL was
- * originally written with support for Copan's (now SGI) high availability
- * framework. That framework was not released by SGI, and would not have
- * been generally applicable to FreeBSD anyway.
- *
- * The idea here is to show the kind of API that would need to be in place
- * in a HA framework to work with CTL's HA hooks. This API is very close
- * to the Copan/SGI API, so that the code using it could stay in place
- * as-is.
- *
- * So, in summary, this is a shell without real substance, and much more
- * work would be needed to actually make HA work. The implementation
- * inside CTL will also need to change to fit the eventual implementation.
- * The additional pieces we would need are:
- *
- * - HA "Supervisor" framework that can startup the components of the
- * system, and initiate failover (i.e. active/active to single mode)
- * and failback (single to active/active mode) state transitions.
- * This framework would be able to recognize when an event happens
- * that requires it to initiate state transitions in the components it
- * manages.
- *
- * - HA communication framework. This framework should have the following
- * features:
- * - Separate channels for separate system components. The CTL
- * instance on one node should communicate with the CTL instance
- * on another node.
- * - Short message passing. These messages would be fixed length, so
- * they could be preallocated and easily passed between the nodes.
- * i.e. conceptually like an ethernet packet.
- * - DMA/large buffer capability. This would require some negotiation
- * with the other node to define the destination. It could
- * allow for "push" (i.e. initiated by the requesting node) DMA or
- * "pull" (i.e. initiated by the target controller) DMA or both.
- * - Communication channel status change notification.
- * - HA capability in other portions of the storage stack. Having two CTL
- * instances communicate is just one part of an overall HA solution.
- * State needs to be synchronized at multiple levels of the system in
- * order for failover to actually work. For instance, if CTL is using a
- * file on a ZFS filesystem as its backing store, the ZFS array state
- * should be synchronized with the other node, so that the other node
- * can immediately take over if the node that is primary for a particular
- * array fails.
- */
-
/*
* Communication channel IDs for various system components. This is to
* make sure one CTL instance talks with another, one ZFS instance talks
* with another, etc.
*/
typedef enum {
- CTL_HA_CHAN_NONE,
CTL_HA_CHAN_CTL,
- CTL_HA_CHAN_ZFS,
+ CTL_HA_CHAN_DATA,
CTL_HA_CHAN_MAX
} ctl_ha_channel;
@@ -120,18 +68,12 @@ typedef enum {
* HA communication subsystem.
*
* CTL_HA_EVT_MSG_RECV: Message received by the other node.
- * CTL_HA_EVT_MSG_SENT: Message sent to the other node.
- * CTL_HA_EVT_DISCONNECT: Communication channel disconnected.
- * CTL_HA_EVT_DMA_SENT: DMA successfully sent to other node (push).
- * CTL_HA_EVT_DMA_RECEIVED: DMA successfully received by other node (pull).
+ * CTL_HA_EVT_LINK_CHANGE: Communication channel status changed.
*/
typedef enum {
CTL_HA_EVT_NONE,
CTL_HA_EVT_MSG_RECV,
- CTL_HA_EVT_MSG_SENT,
- CTL_HA_EVT_DISCONNECT,
- CTL_HA_EVT_DMA_SENT,
- CTL_HA_EVT_DMA_RECEIVED,
+ CTL_HA_EVT_LINK_CHANGE,
CTL_HA_EVT_MAX
} ctl_ha_event;
@@ -146,12 +88,6 @@ typedef enum {
} ctl_ha_status;
typedef enum {
- CTL_HA_DATA_CTL,
- CTL_HA_DATA_ZFS,
- CTL_HA_DATA_MAX
-} ctl_ha_dtid;
-
-typedef enum {
CTL_HA_DT_CMD_READ,
CTL_HA_DT_CMD_WRITE,
} ctl_ha_dt_cmd;
@@ -164,110 +100,40 @@ struct ctl_ha_dt_req {
ctl_ha_dt_cmd command;
void *context;
ctl_ha_dt_cb callback;
- ctl_ha_dtid id;
int ret;
uint32_t size;
uint8_t *local;
uint8_t *remote;
+ TAILQ_ENTRY(ctl_ha_dt_req) links;
};
+struct ctl_softc;
+ctl_ha_status ctl_ha_msg_init(struct ctl_softc *softc);
+ctl_ha_status ctl_ha_msg_shutdown(struct ctl_softc *softc);
+
typedef void (*ctl_evt_handler)(ctl_ha_channel channel, ctl_ha_event event,
int param);
void ctl_ha_register_evthandler(ctl_ha_channel channel,
ctl_evt_handler handler);
-static inline ctl_ha_status
-ctl_ha_msg_create(ctl_ha_channel channel, ctl_evt_handler handler)
-{
- return (CTL_HA_STATUS_SUCCESS);
-}
-
-/*
- * Receive a message of the specified size.
- */
-static inline ctl_ha_status
-ctl_ha_msg_recv(ctl_ha_channel channel, void *buffer, unsigned int size,
- int wait)
-{
- return (CTL_HA_STATUS_SUCCESS);
-}
-
-/*
- * Send a message of the specified size.
- */
-static inline ctl_ha_status
-ctl_ha_msg_send(ctl_ha_channel channel, void *buffer, unsigned int size,
- int wait)
-{
- return (CTL_HA_STATUS_SUCCESS);
-}
-
-/*
- * Allocate a data transfer request structure.
- */
-static inline struct ctl_ha_dt_req *
-ctl_dt_req_alloc(void)
-{
- return (NULL);
-}
-
-/*
- * Free a data transfer request structure.
- */
-static inline void
-ctl_dt_req_free(struct ctl_ha_dt_req *req)
-{
- return;
-}
-
-/*
- * Issue a DMA request for a single buffer.
- */
-static inline ctl_ha_status
-ctl_dt_single(struct ctl_ha_dt_req *req)
-{
- return (CTL_HA_STATUS_WAIT);
-}
+ctl_ha_status ctl_ha_msg_register(ctl_ha_channel channel,
+ ctl_evt_handler handler);
+ctl_ha_status ctl_ha_msg_recv(ctl_ha_channel channel, void *addr,
+ size_t len, int wait);
+ctl_ha_status ctl_ha_msg_send(ctl_ha_channel channel, const void *addr,
+ size_t len, int wait);
+ctl_ha_status ctl_ha_msg_send2(ctl_ha_channel channel, const void *addr,
+ size_t len, const void *addr2, size_t len2, int wait);
+ctl_ha_status ctl_ha_msg_deregister(ctl_ha_channel channel);
-/*
- * SINGLE: One node
- * HA: Two nodes (Active/Active implied)
- * SLAVE/MASTER: The component can set these flags to indicate which side
- * is in control. It has no effect on the HA framework.
- */
-typedef enum {
- CTL_HA_STATE_UNKNOWN = 0x00,
- CTL_HA_STATE_SINGLE = 0x01,
- CTL_HA_STATE_HA = 0x02,
- CTL_HA_STATE_MASK = 0x0F,
- CTL_HA_STATE_SLAVE = 0x10,
- CTL_HA_STATE_MASTER = 0x20
-} ctl_ha_state;
+struct ctl_ha_dt_req * ctl_dt_req_alloc(void);
+void ctl_dt_req_free(struct ctl_ha_dt_req *req);
+ctl_ha_status ctl_dt_single(struct ctl_ha_dt_req *req);
typedef enum {
- CTL_HA_COMP_STATUS_OK,
- CTL_HA_COMP_STATUS_FAILED,
- CTL_HA_COMP_STATUS_ERROR
-} ctl_ha_comp_status;
-
-struct ctl_ha_component;
-
-typedef ctl_ha_comp_status (*ctl_hacmp_init_t)(struct ctl_ha_component *);
-typedef ctl_ha_comp_status (*ctl_hacmp_start_t)(struct ctl_ha_component *,
- ctl_ha_state);
-
-struct ctl_ha_component {
- char *name;
- ctl_ha_state state;
- ctl_ha_comp_status status;
- ctl_hacmp_init_t init;
- ctl_hacmp_start_t start;
- ctl_hacmp_init_t quiesce;
-};
-
-#define CTL_HA_STATE_IS_SINGLE(state) ((state & CTL_HA_STATE_MASK) == \
- CTL_HA_STATE_SINGLE)
-#define CTL_HA_STATE_IS_HA(state) ((state & CTL_HA_STATE_MASK) == \
- CTL_HA_STATE_HA)
+ CTL_HA_LINK_OFFLINE = 0x00,
+ CTL_HA_LINK_UNKNOWN = 0x01,
+ CTL_HA_LINK_ONLINE = 0x02
+} ctl_ha_link_state;
#endif /* _CTL_HA_H_ */
diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h
index 0f3c0fe..e85d0a5 100644
--- a/sys/cam/ctl/ctl_io.h
+++ b/sys/cam/ctl/ctl_io.h
@@ -92,13 +92,11 @@ typedef enum {
CTL_FLAG_EDPTR_SGLIST = 0x00000010, /* ext_data_ptr is S/G list */
CTL_FLAG_DO_AUTOSENSE = 0x00000020, /* grab sense info */
CTL_FLAG_USER_REQ = 0x00000040, /* request came from userland */
- CTL_FLAG_CONTROL_DEV = 0x00000080, /* processor device */
CTL_FLAG_ALLOCATED = 0x00000100, /* data space allocated */
CTL_FLAG_BLOCKED = 0x00000200, /* on the blocked queue */
CTL_FLAG_ABORT_STATUS = 0x00000400, /* return TASK ABORTED status */
CTL_FLAG_ABORT = 0x00000800, /* this I/O should be aborted */
CTL_FLAG_DMA_INPROG = 0x00001000, /* DMA in progress */
- CTL_FLAG_NO_DATASYNC = 0x00002000, /* don't cache flush data */
CTL_FLAG_DELAY_DONE = 0x00004000, /* delay injection done */
CTL_FLAG_INT_COPY = 0x00008000, /* internal copy, no done call*/
CTL_FLAG_SENT_2OTHER_SC = 0x00010000,
@@ -108,9 +106,6 @@ typedef enum {
addresses, not virtual ones*/
CTL_FLAG_IO_CONT = 0x00100000, /* Continue I/O instead of
completing */
- CTL_FLAG_AUTO_MIRROR = 0x00200000, /* Automatically use memory
- from the RC cache mirrored
- address area. */
#if 0
CTL_FLAG_ALREADY_DONE = 0x00200000 /* I/O already completed */
#endif
@@ -118,14 +113,8 @@ typedef enum {
CTL_FLAG_DMA_QUEUED = 0x00800000, /* DMA queued but not started*/
CTL_FLAG_STATUS_QUEUED = 0x01000000, /* Status queued but not sent*/
- CTL_FLAG_REDIR_DONE = 0x02000000, /* Redirection has already
- been done. */
CTL_FLAG_FAILOVER = 0x04000000, /* Killed by a failover */
CTL_FLAG_IO_ACTIVE = 0x08000000, /* I/O active on this SC */
- CTL_FLAG_RDMA_MASK = CTL_FLAG_NO_DATASYNC | CTL_FLAG_BUS_ADDR |
- CTL_FLAG_AUTO_MIRROR | CTL_FLAG_REDIR_DONE,
- /* Flags we care about for
- remote DMA */
CTL_FLAG_STATUS_SENT = 0x10000000 /* Status sent by datamove */
} ctl_io_flags;
@@ -203,15 +192,16 @@ typedef enum {
CTL_MSG_BAD_JUJU,
CTL_MSG_MANAGE_TASKS,
CTL_MSG_PERS_ACTION,
- CTL_MSG_SYNC_FE,
CTL_MSG_DATAMOVE,
- CTL_MSG_DATAMOVE_DONE
+ CTL_MSG_DATAMOVE_DONE,
+ CTL_MSG_UA, /* Set/clear UA on secondary. */
+ CTL_MSG_PORT_SYNC, /* Information about port. */
+ CTL_MSG_LUN_SYNC, /* Information about LUN. */
+ CTL_MSG_FAILOVER /* Fake, never sent though the wire */
} ctl_msg_type;
struct ctl_scsiio;
-#define CTL_NUM_SG_ENTRIES 9
-
struct ctl_io_hdr {
uint32_t version; /* interface version XXX */
ctl_io_type io_type; /* task I/O, SCSI I/O, etc. */
@@ -237,10 +227,8 @@ struct ctl_io_hdr {
union ctl_io *serializing_sc;
void *pool; /* I/O pool */
union ctl_priv ctl_private[CTL_NUM_PRIV];/* CTL private area */
- struct ctl_sg_entry remote_sglist[CTL_NUM_SG_ENTRIES];
- struct ctl_sg_entry remote_dma_sglist[CTL_NUM_SG_ENTRIES];
- struct ctl_sg_entry local_sglist[CTL_NUM_SG_ENTRIES];
- struct ctl_sg_entry local_dma_sglist[CTL_NUM_SG_ENTRIES];
+ struct ctl_sg_entry *remote_sglist;
+ struct ctl_sg_entry *local_sglist;
STAILQ_ENTRY(ctl_io_hdr) links; /* linked list pointer */
TAILQ_ENTRY(ctl_io_hdr) ooa_links;
TAILQ_ENTRY(ctl_io_hdr) blocked_links;
@@ -386,10 +374,10 @@ struct ctl_ha_msg_hdr {
union ctl_io *serializing_sc;
struct ctl_nexus nexus; /* Initiator, port, target, lun */
uint32_t status; /* transaction status */
- TAILQ_ENTRY(ctl_ha_msg_hdr) links;
};
#define CTL_HA_MAX_SG_ENTRIES 16
+#define CTL_HA_DATAMOVE_SEGMENT 131072
/*
* Used for CTL_MSG_PERS_ACTION.
@@ -400,6 +388,16 @@ struct ctl_ha_msg_pr {
};
/*
+ * Used for CTL_MSG_UA.
+ */
+struct ctl_ha_msg_ua {
+ struct ctl_ha_msg_hdr hdr;
+ int ua_all;
+ int ua_set;
+ int ua_type;
+};
+
+/*
* The S/G handling here is a little different than the standard ctl_scsiio
* structure, because we can't pass data by reference in between controllers.
* The S/G list in the ctl_scsiio struct is normally passed in the
@@ -431,17 +429,18 @@ struct ctl_ha_msg_dt {
*/
struct ctl_ha_msg_scsi {
struct ctl_ha_msg_hdr hdr;
- uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */
uint32_t tag_num; /* tag number */
ctl_tag_type tag_type; /* simple, ordered, etc. */
+ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */
+ uint8_t cdb_len; /* CDB length */
uint8_t scsi_status; /* SCSI status byte */
- struct scsi_sense_data sense_data; /* sense data */
uint8_t sense_len; /* Returned sense length */
uint8_t sense_residual; /* sense residual length */
uint32_t residual; /* data residual length */
uint32_t fetd_status; /* trans status, set by FETD,
0 = good*/
struct ctl_lba_len lbalen; /* used for stats */
+ struct scsi_sense_data sense_data; /* sense data */
};
/*
@@ -454,12 +453,50 @@ struct ctl_ha_msg_task {
ctl_tag_type tag_type; /* simple, ordered, etc. */
};
+/*
+ * Used for CTL_MSG_PORT_SYNC.
+ */
+struct ctl_ha_msg_port {
+ struct ctl_ha_msg_hdr hdr;
+ int port_type;
+ int physical_port;
+ int virtual_port;
+ int status;
+ int name_len;
+ int lun_map_len;
+ int port_devid_len;
+ int target_devid_len;
+ uint8_t data[];
+};
+
+/*
+ * Used for CTL_MSG_LUN_SYNC.
+ */
+struct ctl_ha_msg_lun {
+ struct ctl_ha_msg_hdr hdr;
+ int flags;
+ unsigned int pr_generation;
+ uint32_t pr_res_idx;
+ uint8_t pr_res_type;
+ int lun_devid_len;
+ int pr_key_count;
+ uint8_t data[];
+};
+
+struct ctl_ha_msg_lun_pr_key {
+ uint32_t pr_iid;
+ uint64_t pr_key;
+};
+
union ctl_ha_msg {
struct ctl_ha_msg_hdr hdr;
struct ctl_ha_msg_task task;
struct ctl_ha_msg_scsi scsi;
struct ctl_ha_msg_dt dt;
struct ctl_ha_msg_pr pr;
+ struct ctl_ha_msg_ua ua;
+ struct ctl_ha_msg_port port;
+ struct ctl_ha_msg_lun lun;
};
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index a0bbc36..c42c2c9 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -106,8 +106,8 @@ typedef enum {
CTL_CMD_FLAG_OK_ON_BOTH = 0x0300,
CTL_CMD_FLAG_OK_ON_STOPPED = 0x0400,
CTL_CMD_FLAG_OK_ON_INOPERABLE = 0x0800,
- CTL_CMD_FLAG_OK_ON_OFFLINE = 0x1000,
- CTL_CMD_FLAG_OK_ON_SECONDARY = 0x2000,
+ CTL_CMD_FLAG_OK_ON_STANDBY = 0x1000,
+ CTL_CMD_FLAG_OK_ON_UNAVAIL = 0x2000,
CTL_CMD_FLAG_ALLOW_ON_PR_RESV = 0x4000,
CTL_CMD_FLAG_SA5 = 0x8000
} ctl_cmd_flags;
@@ -157,7 +157,8 @@ typedef enum {
CTL_LUN_PR_RESERVED = 0x100,
CTL_LUN_PRIMARY_SC = 0x200,
CTL_LUN_SENSE_DESC = 0x400,
- CTL_LUN_READONLY = 0x800
+ CTL_LUN_READONLY = 0x800,
+ CTL_LUN_PEER_SC_PRIMARY = 0x1000
} ctl_lun_flags;
typedef enum {
@@ -398,7 +399,7 @@ struct ctl_lun {
struct ctl_lun_io_stats stats;
uint32_t res_idx;
unsigned int PRGeneration;
- uint64_t *pr_keys[2 * CTL_MAX_PORTS];
+ uint64_t *pr_keys[CTL_MAX_PORTS];
int pr_key_count;
uint32_t pr_res_idx;
uint8_t res_type;
@@ -434,11 +435,13 @@ struct ctl_softc {
ctl_gen_flags flags;
ctl_ha_mode ha_mode;
int ha_id;
- int ha_state;
int is_single;
- int port_offset;
- int persis_offset;
- int inquiry_pq_no_lun;
+ ctl_ha_link_state ha_link;
+ int port_min;
+ int port_max;
+ int port_cnt;
+ int init_min;
+ int init_max;
struct sysctl_ctx_list sysctl_ctx;
struct sysctl_oid *sysctl_tree;
void *othersc_pool;
@@ -469,8 +472,6 @@ struct ctl_softc {
extern const struct ctl_cmd_entry ctl_cmd_table[256];
uint32_t ctl_get_initindex(struct ctl_nexus *nexus);
-uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
-uint32_t ctl_port_idx(int port_num);
int ctl_lun_map_init(struct ctl_port *port);
int ctl_lun_map_deinit(struct ctl_port *port);
int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun);
@@ -508,7 +509,6 @@ int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio);
int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio);
int ctl_report_supported_tmf(struct ctl_scsiio *ctsio);
int ctl_report_timestamp(struct ctl_scsiio *ctsio);
-int ctl_isc(struct ctl_scsiio *ctsio);
int ctl_get_lba_status(struct ctl_scsiio *ctsio);
void ctl_tpc_init(struct ctl_softc *softc);
diff --git a/sys/cam/ctl/ctl_tpc.c b/sys/cam/ctl/ctl_tpc.c
index b1b674f..e4722fe 100644
--- a/sys/cam/ctl/ctl_tpc.c
+++ b/sys/cam/ctl/ctl_tpc.c
@@ -534,7 +534,7 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
list_id = cdb->list_identifier;
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
- ctl_get_resindex(&ctsio->io_hdr.nexus));
+ ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
@@ -616,7 +616,7 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
list_id = cdb->list_identifier;
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
- ctl_get_resindex(&ctsio->io_hdr.nexus));
+ ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL || !list->completed) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
@@ -688,7 +688,7 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
list_id = scsi_4btoul(cdb->list_identifier);
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
- ctl_get_resindex(&ctsio->io_hdr.nexus));
+ ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
@@ -771,7 +771,7 @@ ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
list_id = scsi_4btoul(cdb->list_identifier);
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
- ctl_get_resindex(&ctsio->io_hdr.nexus));
+ ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
@@ -1645,7 +1645,7 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
list->init_port = -1;
else
list->init_port = ctsio->io_hdr.nexus.targ_port;
- list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
list->list_id = data->list_identifier;
list->flags = data->flags;
list->params = ctsio->kern_data_ptr;
@@ -1772,7 +1772,7 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
list->init_port = -1;
else
list->init_port = ctsio->io_hdr.nexus.targ_port;
- list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
list->list_id = scsi_4btoul(data->list_identifier);
list->flags = data->flags;
list->params = ctsio->kern_data_ptr;
@@ -1890,7 +1890,7 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
softc = lun->ctl_softc;
- port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
+ port = softc->ctl_ports[ctsio->io_hdr.nexus.targ_port];
cdb = (struct scsi_populate_token *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -1944,7 +1944,7 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
list->service_action = cdb->service_action;
list->init_port = ctsio->io_hdr.nexus.targ_port;
- list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
list->list_id = scsi_4btoul(cdb->list_identifier);
list->flags = data->flags;
list->ctsio = ctsio;
@@ -2070,7 +2070,7 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
list->service_action = cdb->service_action;
list->init_port = ctsio->io_hdr.nexus.targ_port;
- list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
list->list_id = scsi_4btoul(cdb->list_identifier);
list->flags = data->flags;
list->params = ctsio->kern_data_ptr;
@@ -2162,7 +2162,7 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
list_id = scsi_4btoul(cdb->list_identifier);
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
- ctl_get_resindex(&ctsio->io_hdr.nexus));
+ ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
diff --git a/sys/cam/ctl/ctl_tpc_local.c b/sys/cam/ctl/ctl_tpc_local.c
index 40e6df8..7664cb9 100644
--- a/sys/cam/ctl/ctl_tpc_local.c
+++ b/sys/cam/ctl/ctl_tpc_local.c
@@ -97,11 +97,11 @@ tpcl_init(void)
port->fe_done = tpcl_done;
port->max_targets = 1;
port->max_target_id = 0;
+ port->targ_port = -1;
port->max_initiators = 1;
- if (ctl_port_register(port) != 0)
- {
- printf("%s: tpc frontend registration failed\n", __func__);
+ if (ctl_port_register(port) != 0) {
+ printf("%s: ctl_port_register() failed with error\n", __func__);
return (0);
}
@@ -287,7 +287,7 @@ tpcl_resolve(struct ctl_softc *softc, int init_port,
cscdid = (struct scsi_ec_cscd_id *)cscd;
mtx_lock(&softc->ctl_lock);
if (init_port >= 0)
- port = softc->ctl_ports[ctl_port_idx(init_port)];
+ port = softc->ctl_ports[init_port];
else
port = NULL;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c
index aab1a30..655afd8 100644
--- a/sys/cam/ctl/scsi_ctl.c
+++ b/sys/cam/ctl/scsi_ctl.c
@@ -400,6 +400,7 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
*/
port->max_targets = cpi->max_target;
port->max_target_id = cpi->max_target;
+ port->targ_port = -1;
/*
* XXX KDM need to figure out whether we're the master or
diff --git a/sys/conf/files b/sys/conf/files
index 34bcb03..7f829bc 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -85,6 +85,7 @@ cam/ctl/ctl_frontend.c optional ctl
cam/ctl/ctl_frontend_cam_sim.c optional ctl
cam/ctl/ctl_frontend_ioctl.c optional ctl
cam/ctl/ctl_frontend_iscsi.c optional ctl
+cam/ctl/ctl_ha.c optional ctl
cam/ctl/ctl_scsi_all.c optional ctl
cam/ctl/ctl_tpc.c optional ctl
cam/ctl/ctl_tpc_local.c optional ctl
diff --git a/sys/modules/ctl/Makefile b/sys/modules/ctl/Makefile
index 452a641..dc64a4b 100644
--- a/sys/modules/ctl/Makefile
+++ b/sys/modules/ctl/Makefile
@@ -13,6 +13,7 @@ SRCS+= ctl_frontend.c
SRCS+= ctl_frontend_cam_sim.c
SRCS+= ctl_frontend_ioctl.c
SRCS+= ctl_frontend_iscsi.c
+SRCS+= ctl_ha.c
SRCS+= ctl_scsi_all.c
SRCS+= ctl_tpc.c
SRCS+= ctl_tpc_local.c
OpenPOWER on IntegriCloud