diff options
author | mav <mav@FreeBSD.org> | 2014-01-09 10:59:31 +0000 |
---|---|---|
committer | mav <mav@FreeBSD.org> | 2014-01-09 10:59:31 +0000 |
commit | f9a52747b5cf2552bf10c81b284ade1714a3d3d0 (patch) | |
tree | 2a89c6d4fc2d2fdcd2e1493f0efbd56f247ede84 /sys/cam | |
parent | 288680289630ecbdcea7b5bc516327ed59b52411 (diff) | |
download | FreeBSD-src-f9a52747b5cf2552bf10c81b284ade1714a3d3d0.zip FreeBSD-src-f9a52747b5cf2552bf10c81b284ade1714a3d3d0.tar.gz |
MFC r257946:
Introduce seperate mutex lock to protect protect CTL I/O pools, slightly
reducing global CTL lock scope and congestion.
While there, simplify CTL I/O pools KPI, hiding implementation details.
Diffstat (limited to 'sys/cam')
-rw-r--r-- | sys/cam/ctl/ctl.c | 155 | ||||
-rw-r--r-- | sys/cam/ctl/ctl_frontend.c | 8 | ||||
-rw-r--r-- | sys/cam/ctl/ctl_private.h | 6 |
3 files changed, 66 insertions, 103 deletions
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c index 853a36d..d88150f 100644 --- a/sys/cam/ctl/ctl.c +++ b/sys/cam/ctl/ctl.c @@ -360,7 +360,6 @@ static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, int can_wait); static void ctl_kfree_io(union ctl_io *io); #endif /* unused */ -static void ctl_free_io_internal(union ctl_io *io, int have_lock); static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, struct ctl_be_lun *be_lun, struct ctl_id target_id); static int ctl_free_lun(struct ctl_lun *lun); @@ -998,6 +997,7 @@ ctl_init(void) "Report no lun possible for invalid LUNs"); mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); + mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF); softc->open_count = 0; /* @@ -1057,7 +1057,7 @@ ctl_init(void) CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { printf("ctl: can't allocate %d entry emergency pool, " "exiting\n", CTL_POOL_ENTRIES_EMERGENCY); - ctl_pool_free(softc, internal_pool); + ctl_pool_free(internal_pool); return (ENOMEM); } @@ -1066,8 +1066,8 @@ ctl_init(void) { printf("ctl: can't allocate %d entry other SC pool, " "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); - ctl_pool_free(softc, internal_pool); - ctl_pool_free(softc, emergency_pool); + ctl_pool_free(internal_pool); + ctl_pool_free(emergency_pool); return (ENOMEM); } @@ -1075,12 +1075,6 @@ ctl_init(void) softc->emergency_pool = emergency_pool; softc->othersc_pool = other_pool; - mtx_lock(&softc->ctl_lock); - ctl_pool_acquire(internal_pool); - ctl_pool_acquire(emergency_pool); - ctl_pool_acquire(other_pool); - mtx_unlock(&softc->ctl_lock); - /* * We used to allocate a processor LUN here. The new scheme is to * just let the user allocate LUNs as he sees fit. @@ -1097,10 +1091,10 @@ ctl_init(void) printf("error creating CTL work thread!\n"); mtx_lock(&softc->ctl_lock); ctl_free_lun(lun); - ctl_pool_free(softc, internal_pool); - ctl_pool_free(softc, emergency_pool); - ctl_pool_free(softc, other_pool); mtx_unlock(&softc->ctl_lock); + ctl_pool_free(internal_pool); + ctl_pool_free(emergency_pool); + ctl_pool_free(other_pool); return (error); } if (bootverbose) @@ -1154,7 +1148,7 @@ ctl_shutdown(void) { struct ctl_softc *softc; struct ctl_lun *lun, *next_lun; - struct ctl_io_pool *pool, *next_pool; + struct ctl_io_pool *pool; softc = (struct ctl_softc *)control_softc; @@ -1171,6 +1165,8 @@ ctl_shutdown(void) ctl_free_lun(lun); } + mtx_unlock(&softc->ctl_lock); + /* * This will rip the rug out from under any FETDs or anyone else * that has a pool allocated. Since we increment our module @@ -1179,18 +1175,14 @@ ctl_shutdown(void) * able to unload the CTL module until client modules have * successfully unloaded. */ - for (pool = STAILQ_FIRST(&softc->io_pools); pool != NULL; - pool = next_pool) { - next_pool = STAILQ_NEXT(pool, links); - ctl_pool_free(softc, pool); - } - - mtx_unlock(&softc->ctl_lock); + while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL) + ctl_pool_free(pool); #if 0 ctl_shutdown_thread(softc->work_thread); #endif + mtx_destroy(&softc->pool_lock); mtx_destroy(&softc->ctl_lock); destroy_dev(softc->dev); @@ -3367,11 +3359,12 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, pool->type = pool_type; pool->ctl_softc = ctl_softc; - mtx_lock(&ctl_softc->ctl_lock); + mtx_lock(&ctl_softc->pool_lock); pool->id = ctl_softc->cur_pool_id++; - mtx_unlock(&ctl_softc->ctl_lock); + mtx_unlock(&ctl_softc->pool_lock); pool->flags = CTL_POOL_FLAG_NONE; + pool->refcount = 1; /* Reference for validity. */ STAILQ_INIT(&pool->free_queue); /* @@ -3407,7 +3400,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, free(pool, M_CTL); goto bailout; } - mtx_lock(&ctl_softc->ctl_lock); + mtx_lock(&ctl_softc->pool_lock); ctl_softc->num_pools++; STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); /* @@ -3426,7 +3419,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, MOD_INC_USE_COUNT; #endif - mtx_unlock(&ctl_softc->ctl_lock); + mtx_unlock(&ctl_softc->pool_lock); *npool = pool; @@ -3435,14 +3428,11 @@ bailout: return (retval); } -int +static int ctl_pool_acquire(struct ctl_io_pool *pool) { - mtx_assert(&control_softc->ctl_lock, MA_OWNED); - - if (pool == NULL) - return (-EINVAL); + mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED); if (pool->flags & CTL_POOL_FLAG_INVALID) return (-EINVAL); @@ -3452,51 +3442,21 @@ ctl_pool_acquire(struct ctl_io_pool *pool) return (0); } -int -ctl_pool_invalidate(struct ctl_io_pool *pool) -{ - - mtx_assert(&control_softc->ctl_lock, MA_OWNED); - - if (pool == NULL) - return (-EINVAL); - - pool->flags |= CTL_POOL_FLAG_INVALID; - - return (0); -} - -int +static void ctl_pool_release(struct ctl_io_pool *pool) { + struct ctl_softc *ctl_softc = pool->ctl_softc; + union ctl_io *io; - mtx_assert(&control_softc->ctl_lock, MA_OWNED); - - if (pool == NULL) - return (-EINVAL); - - if ((--pool->refcount == 0) - && (pool->flags & CTL_POOL_FLAG_INVALID)) { - ctl_pool_free(pool->ctl_softc, pool); - } - - return (0); -} - -void -ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool) -{ - union ctl_io *cur_io, *next_io; + mtx_assert(&ctl_softc->pool_lock, MA_OWNED); - mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); + if (--pool->refcount != 0) + return; - for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); - cur_io != NULL; cur_io = next_io) { - next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, - links); - STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, ctl_io_hdr, + while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) { + STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr, links); - free(cur_io, M_CTL); + free(io, M_CTL); } STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); @@ -3515,6 +3475,21 @@ ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool) free(pool, M_CTL); } +void +ctl_pool_free(struct ctl_io_pool *pool) +{ + struct ctl_softc *ctl_softc; + + if (pool == NULL) + return; + + ctl_softc = pool->ctl_softc; + mtx_lock(&ctl_softc->pool_lock); + pool->flags |= CTL_POOL_FLAG_INVALID; + ctl_pool_release(pool); + mtx_unlock(&ctl_softc->pool_lock); +} + /* * This routine does not block (except for spinlocks of course). * It tries to allocate a ctl_io union from the caller's pool as quickly as @@ -3539,7 +3514,7 @@ ctl_alloc_io(void *pool_ref) ctl_softc = pool->ctl_softc; - mtx_lock(&ctl_softc->ctl_lock); + mtx_lock(&ctl_softc->pool_lock); /* * First, try to get the io structure from the user's pool. */ @@ -3549,7 +3524,7 @@ ctl_alloc_io(void *pool_ref) STAILQ_REMOVE_HEAD(&pool->free_queue, links); pool->total_allocated++; pool->free_ctl_io--; - mtx_unlock(&ctl_softc->ctl_lock); + mtx_unlock(&ctl_softc->pool_lock); return (io); } else ctl_pool_release(pool); @@ -3572,14 +3547,14 @@ ctl_alloc_io(void *pool_ref) STAILQ_REMOVE_HEAD(&npool->free_queue, links); npool->total_allocated++; npool->free_ctl_io--; - mtx_unlock(&ctl_softc->ctl_lock); + mtx_unlock(&ctl_softc->pool_lock); return (io); } else ctl_pool_release(npool); } /* Drop the spinlock before we malloc */ - mtx_unlock(&ctl_softc->ctl_lock); + mtx_unlock(&ctl_softc->pool_lock); /* * The emergency pool (if it exists) didn't have one, so try an @@ -3592,7 +3567,7 @@ ctl_alloc_io(void *pool_ref) * ctl_io to its list when it gets freed. */ if (emergency_pool != NULL) { - mtx_lock(&ctl_softc->ctl_lock); + mtx_lock(&ctl_softc->pool_lock); if (ctl_pool_acquire(emergency_pool) == 0) { io->io_hdr.pool = emergency_pool; emergency_pool->total_ctl_io++; @@ -3604,7 +3579,7 @@ ctl_alloc_io(void *pool_ref) */ emergency_pool->total_allocated++; } - mtx_unlock(&ctl_softc->ctl_lock); + mtx_unlock(&ctl_softc->pool_lock); } else io->io_hdr.pool = NULL; } @@ -3612,8 +3587,8 @@ ctl_alloc_io(void *pool_ref) return (io); } -static void -ctl_free_io_internal(union ctl_io *io, int have_lock) +void +ctl_free_io(union ctl_io *io) { if (io == NULL) return; @@ -3634,8 +3609,7 @@ ctl_free_io_internal(union ctl_io *io, int have_lock) pool = (struct ctl_io_pool *)io->io_hdr.pool; - if (have_lock == 0) - mtx_lock(&pool->ctl_softc->ctl_lock); + mtx_lock(&pool->ctl_softc->pool_lock); #if 0 save_flags(xflags); @@ -3672,8 +3646,7 @@ ctl_free_io_internal(union ctl_io *io, int have_lock) pool->total_freed++; pool->free_ctl_io++; ctl_pool_release(pool); - if (have_lock == 0) - mtx_unlock(&pool->ctl_softc->ctl_lock); + mtx_unlock(&pool->ctl_softc->pool_lock); } else { /* * Otherwise, just free it. We probably malloced it and @@ -3685,12 +3658,6 @@ ctl_free_io_internal(union ctl_io *io, int have_lock) } void -ctl_free_io(union ctl_io *io) -{ - ctl_free_io_internal(io, /*have_lock*/ 0); -} - -void ctl_zero_io(union ctl_io *io) { void *pool_ref; @@ -4496,7 +4463,7 @@ ctl_free_lun(struct ctl_lun *lun) io = next_io) { next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links); TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); - ctl_free_io_internal(io, /*have_lock*/ 1); + ctl_free_io(io); } softc->num_luns--; @@ -10211,7 +10178,7 @@ ctl_failover(void) TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); - ctl_free_io_internal(io, 1); + ctl_free_io(io); } } @@ -10227,7 +10194,7 @@ ctl_failover(void) &io->io_hdr, ooa_links); - ctl_free_io_internal(io, 1); + ctl_free_io(io); } } ctl_check_blocked(lun); @@ -11118,7 +11085,7 @@ ctl_run_task_queue(struct ctl_softc *ctl_softc) io->taskio.tag_num : io->scsiio.tag_num); STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr, ctl_io_hdr, links); - ctl_free_io_internal(io, 1); + ctl_free_io(io); break; } } @@ -11215,7 +11182,7 @@ ctl_handle_isc(union ctl_io *io) break; } if (free_io) - ctl_free_io_internal(io, 0); + ctl_free_io(io); } @@ -12363,7 +12330,7 @@ ctl_process_done(union ctl_io *io, int have_lock) case CTL_IO_TASK: ctl_io_error_print(io, NULL); if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) - ctl_free_io_internal(io, /*have_lock*/ 0); + ctl_free_io(io); else fe_done(io); return (CTL_RETVAL_COMPLETE); @@ -12682,7 +12649,7 @@ ctl_process_done(union ctl_io *io, int have_lock) /* XXX do something here */ } - ctl_free_io_internal(io, /*have_lock*/ 0); + ctl_free_io(io); } else fe_done(io); diff --git a/sys/cam/ctl/ctl_frontend.c b/sys/cam/ctl/ctl_frontend.c index 83042ee..2bc6ade 100644 --- a/sys/cam/ctl/ctl_frontend.c +++ b/sys/cam/ctl/ctl_frontend.c @@ -114,7 +114,6 @@ ctl_frontend_register(struct ctl_frontend *fe, int master_shelf) fe->targ_port = port_num + (master_shelf!=0 ? 0 : CTL_MAX_PORTS); fe->max_initiators = CTL_MAX_INIT_PER_PORT; STAILQ_INSERT_TAIL(&control_softc->fe_list, fe, links); - ctl_pool_acquire(pool); control_softc->ctl_ports[port_num] = fe; mtx_unlock(&control_softc->ctl_lock); @@ -141,10 +140,6 @@ ctl_frontend_deregister(struct ctl_frontend *fe) } mtx_lock(&control_softc->ctl_lock); - - ctl_pool_invalidate(pool); - ctl_pool_release(pool); - STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links); control_softc->num_frontends--; port_num = (fe->targ_port < CTL_MAX_PORTS) ? fe->targ_port : @@ -152,6 +147,9 @@ ctl_frontend_deregister(struct ctl_frontend *fe) ctl_clear_mask(&control_softc->ctl_port_mask, port_num); control_softc->ctl_ports[port_num] = NULL; mtx_unlock(&control_softc->ctl_lock); + + ctl_pool_free(pool); + bailout: return (retval); } diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h index 77611276..d2035fb 100644 --- a/sys/cam/ctl/ctl_private.h +++ b/sys/cam/ctl/ctl_private.h @@ -448,6 +448,7 @@ struct ctl_softc { struct ctl_frontend *ctl_ports[CTL_MAX_PORTS]; uint32_t num_backends; STAILQ_HEAD(, ctl_backend_driver) be_list; + struct mtx pool_lock; uint32_t num_pools; uint32_t cur_pool_id; STAILQ_HEAD(, ctl_io_pool) io_pools; @@ -462,10 +463,7 @@ extern struct ctl_cmd_entry ctl_cmd_table[]; uint32_t ctl_get_initindex(struct ctl_nexus *nexus); int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, uint32_t total_ctl_io, struct ctl_io_pool **npool); -int ctl_pool_acquire(struct ctl_io_pool *pool); -int ctl_pool_invalidate(struct ctl_io_pool *pool); -int ctl_pool_release(struct ctl_io_pool *pool); -void ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool); +void ctl_pool_free(struct ctl_io_pool *pool); int ctl_scsi_release(struct ctl_scsiio *ctsio); int ctl_scsi_reserve(struct ctl_scsiio *ctsio); int ctl_start_stop(struct ctl_scsiio *ctsio); |