summaryrefslogtreecommitdiffstats
path: root/sys/cam/ctl/ctl.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/cam/ctl/ctl.c')
-rw-r--r--sys/cam/ctl/ctl.c371
1 files changed, 65 insertions, 306 deletions
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 6c0530f..ee30d51 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -64,6 +64,7 @@ __FBSDID("$FreeBSD$");
#include <sys/smp.h>
#include <sys/endian.h>
#include <sys/sysctl.h>
+#include <vm/uma.h>
#include <cam/cam.h>
#include <cam/scsi/scsi_all.h>
@@ -644,7 +645,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
#if 0
printf("Serialize\n");
#endif
- io = ctl_alloc_io((void *)ctl_softc->othersc_pool);
+ io = ctl_alloc_io_nowait(ctl_softc->othersc_pool);
if (io == NULL) {
printf("ctl_isc_event_handler: can't allocate "
"ctl_io!\n");
@@ -889,8 +890,8 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
/* Handle resets sent from the other side */
case CTL_MSG_MANAGE_TASKS: {
struct ctl_taskio *taskio;
- taskio = (struct ctl_taskio *)ctl_alloc_io(
- (void *)ctl_softc->othersc_pool);
+ taskio = (struct ctl_taskio *)ctl_alloc_io_nowait(
+ ctl_softc->othersc_pool);
if (taskio == NULL) {
printf("ctl_isc_event_handler: can't allocate "
"ctl_io!\n");
@@ -918,8 +919,8 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
}
/* Persistent Reserve action which needs attention */
case CTL_MSG_PERS_ACTION:
- presio = (struct ctl_prio *)ctl_alloc_io(
- (void *)ctl_softc->othersc_pool);
+ presio = (struct ctl_prio *)ctl_alloc_io_nowait(
+ ctl_softc->othersc_pool);
if (presio == NULL) {
printf("ctl_isc_event_handler: can't allocate "
"ctl_io!\n");
@@ -1003,7 +1004,7 @@ static int
ctl_init(void)
{
struct ctl_softc *softc;
- struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
+ void *other_pool;
struct ctl_port *port;
int i, error, retval;
//int isc_retval;
@@ -1049,7 +1050,8 @@ ctl_init(void)
"Report no lun possible for invalid LUNs");
mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
- mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF);
+ softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
softc->open_count = 0;
/*
@@ -1086,36 +1088,15 @@ ctl_init(void)
STAILQ_INIT(&softc->fe_list);
STAILQ_INIT(&softc->port_list);
STAILQ_INIT(&softc->be_list);
- STAILQ_INIT(&softc->io_pools);
ctl_tpc_init(softc);
- if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
- &internal_pool)!= 0){
- printf("ctl: can't allocate %d entry internal pool, "
- "exiting\n", CTL_POOL_ENTRIES_INTERNAL);
- return (ENOMEM);
- }
-
- if (ctl_pool_create(softc, CTL_POOL_EMERGENCY,
- CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
- printf("ctl: can't allocate %d entry emergency pool, "
- "exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
- ctl_pool_free(internal_pool);
- return (ENOMEM);
- }
-
- if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC,
+ if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
&other_pool) != 0)
{
printf("ctl: can't allocate %d entry other SC pool, "
"exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
- ctl_pool_free(internal_pool);
- ctl_pool_free(emergency_pool);
return (ENOMEM);
}
-
- softc->internal_pool = internal_pool;
- softc->emergency_pool = emergency_pool;
softc->othersc_pool = other_pool;
if (worker_threads <= 0)
@@ -1137,8 +1118,6 @@ ctl_init(void)
&softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
if (error != 0) {
printf("error creating CTL work thread!\n");
- ctl_pool_free(internal_pool);
- ctl_pool_free(emergency_pool);
ctl_pool_free(other_pool);
return (error);
}
@@ -1147,8 +1126,6 @@ ctl_init(void)
&softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
if (error != 0) {
printf("error creating CTL lun thread!\n");
- ctl_pool_free(internal_pool);
- ctl_pool_free(emergency_pool);
ctl_pool_free(other_pool);
return (error);
}
@@ -1156,8 +1133,6 @@ ctl_init(void)
&softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
if (error != 0) {
printf("error creating CTL threshold thread!\n");
- ctl_pool_free(internal_pool);
- ctl_pool_free(emergency_pool);
ctl_pool_free(other_pool);
return (error);
}
@@ -1210,7 +1185,6 @@ ctl_shutdown(void)
{
struct ctl_softc *softc;
struct ctl_lun *lun, *next_lun;
- struct ctl_io_pool *pool;
softc = (struct ctl_softc *)control_softc;
@@ -1231,24 +1205,13 @@ ctl_shutdown(void)
ctl_frontend_deregister(&ioctl_frontend);
- /*
- * This will rip the rug out from under any FETDs or anyone else
- * that has a pool allocated. Since we increment our module
- * refcount any time someone outside the main CTL module allocates
- * a pool, we shouldn't have any problems here. The user won't be
- * able to unload the CTL module until client modules have
- * successfully unloaded.
- */
- while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL)
- ctl_pool_free(pool);
-
#if 0
ctl_shutdown_thread(softc->work_thread);
mtx_destroy(&softc->queue_lock);
#endif
ctl_tpc_shutdown(softc);
- mtx_destroy(&softc->pool_lock);
+ uma_zdestroy(softc->io_zone);
mtx_destroy(&softc->ctl_lock);
destroy_dev(softc->dev);
@@ -2371,21 +2334,15 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref);
- if (io == NULL) {
- printf("ctl_ioctl: can't allocate ctl_io!\n");
- retval = ENOSPC;
- break;
- }
/*
* Need to save the pool reference so it doesn't get
* spammed by the user's ctl_io.
*/
pool_tmp = io->io_hdr.pool;
-
memcpy(io, (void *)addr, sizeof(*io));
-
io->io_hdr.pool = pool_tmp;
+
/*
* No status yet, so make sure the status is set properly.
*/
@@ -3729,285 +3686,95 @@ ctl_kfree_io(union ctl_io *io)
#endif /* unused */
/*
- * ctl_softc, pool_type, total_ctl_io are passed in.
+ * ctl_softc, pool_name, total_ctl_io are passed in.
* npool is passed out.
*/
int
-ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
- uint32_t total_ctl_io, struct ctl_io_pool **npool)
+ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
+ uint32_t total_ctl_io, void **npool)
{
- uint32_t i;
- union ctl_io *cur_io, *next_io;
+#ifdef IO_POOLS
struct ctl_io_pool *pool;
- int retval;
-
- retval = 0;
pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
M_NOWAIT | M_ZERO);
- if (pool == NULL) {
- retval = ENOMEM;
- goto bailout;
- }
+ if (pool == NULL)
+ return (ENOMEM);
- pool->type = pool_type;
+ snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
pool->ctl_softc = ctl_softc;
-
- mtx_lock(&ctl_softc->pool_lock);
- pool->id = ctl_softc->cur_pool_id++;
- mtx_unlock(&ctl_softc->pool_lock);
-
- pool->flags = CTL_POOL_FLAG_NONE;
- pool->refcount = 1; /* Reference for validity. */
- STAILQ_INIT(&pool->free_queue);
-
- /*
- * XXX KDM other options here:
- * - allocate a page at a time
- * - allocate one big chunk of memory.
- * Page allocation might work well, but would take a little more
- * tracking.
- */
- for (i = 0; i < total_ctl_io; i++) {
- cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO,
- M_NOWAIT);
- if (cur_io == NULL) {
- retval = ENOMEM;
- break;
- }
- cur_io->io_hdr.pool = pool;
- STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links);
- pool->total_ctl_io++;
- pool->free_ctl_io++;
- }
-
- if (retval != 0) {
- for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
- cur_io != NULL; cur_io = next_io) {
- next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
- links);
- STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
- ctl_io_hdr, links);
- free(cur_io, M_CTLIO);
- }
-
- free(pool, M_CTL);
- goto bailout;
- }
- mtx_lock(&ctl_softc->pool_lock);
- ctl_softc->num_pools++;
- STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
- /*
- * Increment our usage count if this is an external consumer, so we
- * can't get unloaded until the external consumer (most likely a
- * FETD) unloads and frees his pool.
- *
- * XXX KDM will this increment the caller's module use count, or
- * mine?
- */
-#if 0
- if ((pool_type != CTL_POOL_EMERGENCY)
- && (pool_type != CTL_POOL_INTERNAL)
- && (pool_type != CTL_POOL_4OTHERSC))
- MOD_INC_USE_COUNT;
-#endif
-
- mtx_unlock(&ctl_softc->pool_lock);
+ pool->zone = uma_zsecond_create(pool->name, NULL,
+ NULL, NULL, NULL, ctl_softc->io_zone);
+ /* uma_prealloc(pool->zone, total_ctl_io); */
*npool = pool;
-
-bailout:
-
- return (retval);
-}
-
-static int
-ctl_pool_acquire(struct ctl_io_pool *pool)
-{
-
- mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED);
-
- if (pool->flags & CTL_POOL_FLAG_INVALID)
- return (EINVAL);
-
- pool->refcount++;
-
- return (0);
-}
-
-static void
-ctl_pool_release(struct ctl_io_pool *pool)
-{
- struct ctl_softc *ctl_softc = pool->ctl_softc;
- union ctl_io *io;
-
- mtx_assert(&ctl_softc->pool_lock, MA_OWNED);
-
- if (--pool->refcount != 0)
- return;
-
- while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) {
- STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr,
- links);
- free(io, M_CTLIO);
- }
-
- STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
- ctl_softc->num_pools--;
-
- /*
- * XXX KDM will this decrement the caller's usage count or mine?
- */
-#if 0
- if ((pool->type != CTL_POOL_EMERGENCY)
- && (pool->type != CTL_POOL_INTERNAL)
- && (pool->type != CTL_POOL_4OTHERSC))
- MOD_DEC_USE_COUNT;
+#else
+ *npool = ctl_softc->io_zone;
#endif
-
- free(pool, M_CTL);
+ return (0);
}
void
ctl_pool_free(struct ctl_io_pool *pool)
{
- struct ctl_softc *ctl_softc;
if (pool == NULL)
return;
- ctl_softc = pool->ctl_softc;
- mtx_lock(&ctl_softc->pool_lock);
- pool->flags |= CTL_POOL_FLAG_INVALID;
- ctl_pool_release(pool);
- mtx_unlock(&ctl_softc->pool_lock);
+#ifdef IO_POOLS
+ uma_zdestroy(pool->zone);
+ free(pool, M_CTL);
+#endif
}
-/*
- * This routine does not block (except for spinlocks of course).
- * It tries to allocate a ctl_io union from the caller's pool as quickly as
- * possible.
- */
union ctl_io *
ctl_alloc_io(void *pool_ref)
{
union ctl_io *io;
- struct ctl_softc *ctl_softc;
- struct ctl_io_pool *pool, *npool;
- struct ctl_io_pool *emergency_pool;
-
- pool = (struct ctl_io_pool *)pool_ref;
-
- if (pool == NULL) {
- printf("%s: pool is NULL\n", __func__);
- return (NULL);
- }
-
- emergency_pool = NULL;
-
- ctl_softc = pool->ctl_softc;
-
- mtx_lock(&ctl_softc->pool_lock);
- /*
- * First, try to get the io structure from the user's pool.
- */
- if (ctl_pool_acquire(pool) == 0) {
- io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
- if (io != NULL) {
- STAILQ_REMOVE_HEAD(&pool->free_queue, links);
- pool->total_allocated++;
- pool->free_ctl_io--;
- mtx_unlock(&ctl_softc->pool_lock);
- return (io);
- } else
- ctl_pool_release(pool);
- }
- /*
- * If he doesn't have any io structures left, search for an
- * emergency pool and grab one from there.
- */
- STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) {
- if (npool->type != CTL_POOL_EMERGENCY)
- continue;
-
- if (ctl_pool_acquire(npool) != 0)
- continue;
-
- emergency_pool = npool;
+#ifdef IO_POOLS
+ struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
- io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue);
- if (io != NULL) {
- STAILQ_REMOVE_HEAD(&npool->free_queue, links);
- npool->total_allocated++;
- npool->free_ctl_io--;
- mtx_unlock(&ctl_softc->pool_lock);
- return (io);
- } else
- ctl_pool_release(npool);
- }
-
- /* Drop the spinlock before we malloc */
- mtx_unlock(&ctl_softc->pool_lock);
+ io = uma_zalloc(pool->zone, M_WAITOK);
+#else
+ io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK);
+#endif
+ if (io != NULL)
+ io->io_hdr.pool = pool_ref;
+ return (io);
+}
- /*
- * The emergency pool (if it exists) didn't have one, so try an
- * atomic (i.e. nonblocking) malloc and see if we get lucky.
- */
- io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT);
- if (io != NULL) {
- /*
- * If the emergency pool exists but is empty, add this
- * ctl_io to its list when it gets freed.
- */
- if (emergency_pool != NULL) {
- mtx_lock(&ctl_softc->pool_lock);
- if (ctl_pool_acquire(emergency_pool) == 0) {
- io->io_hdr.pool = emergency_pool;
- emergency_pool->total_ctl_io++;
- /*
- * Need to bump this, otherwise
- * total_allocated and total_freed won't
- * match when we no longer have anything
- * outstanding.
- */
- emergency_pool->total_allocated++;
- }
- mtx_unlock(&ctl_softc->pool_lock);
- } else
- io->io_hdr.pool = NULL;
- }
+union ctl_io *
+ctl_alloc_io_nowait(void *pool_ref)
+{
+ union ctl_io *io;
+#ifdef IO_POOLS
+ struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
+ io = uma_zalloc(pool->zone, M_NOWAIT);
+#else
+ io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT);
+#endif
+ if (io != NULL)
+ io->io_hdr.pool = pool_ref;
return (io);
}
void
ctl_free_io(union ctl_io *io)
{
+#ifdef IO_POOLS
+ struct ctl_io_pool *pool;
+#endif
+
if (io == NULL)
return;
- /*
- * If this ctl_io has a pool, return it to that pool.
- */
- if (io->io_hdr.pool != NULL) {
- struct ctl_io_pool *pool;
-
- pool = (struct ctl_io_pool *)io->io_hdr.pool;
- mtx_lock(&pool->ctl_softc->pool_lock);
- io->io_hdr.io_type = 0xff;
- STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
- pool->total_freed++;
- pool->free_ctl_io++;
- ctl_pool_release(pool);
- mtx_unlock(&pool->ctl_softc->pool_lock);
- } else {
- /*
- * Otherwise, just free it. We probably malloced it and
- * the emergency pool wasn't available.
- */
- free(io, M_CTLIO);
- }
-
+#ifdef IO_POOLS
+ pool = (struct ctl_io_pool *)io->io_hdr.pool;
+ uma_zfree(pool->zone, io);
+#else
+ uma_zfree((uma_zone_t)io->io_hdr.pool, io);
+#endif
}
void
@@ -4022,9 +3789,7 @@ ctl_zero_io(union ctl_io *io)
* May need to preserve linked list pointers at some point too.
*/
pool_ref = io->io_hdr.pool;
-
memset(io, 0, sizeof(*io));
-
io->io_hdr.pool = pool_ref;
}
@@ -5657,16 +5422,10 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
union ctl_io *new_io;
new_io = ctl_alloc_io(ctsio->io_hdr.pool);
- if (new_io == NULL) {
- ctl_set_busy(ctsio);
- ctl_done((union ctl_io *)ctsio);
- } else {
- ctl_copy_io((union ctl_io *)ctsio,
- new_io);
- retval = lun->backend->config_write(new_io);
- ctl_set_success(ctsio);
- ctl_done((union ctl_io *)ctsio);
- }
+ ctl_copy_io((union ctl_io *)ctsio, new_io);
+ retval = lun->backend->config_write(new_io);
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
} else {
retval = lun->backend->config_write(
(union ctl_io *)ctsio);
OpenPOWER on IntegriCloud