summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2013-08-05 11:48:40 +0000
committermav <mav@FreeBSD.org>2013-08-05 11:48:40 +0000
commitbbfed93309ad0e43fcd90d67e5ba66ff86d113ff (patch)
treead49b3f2fe36aff83e041ef6aa15d048ab38b893
parent08061c3f1e43cdc5bba4b98a08da51432d3ba18c (diff)
downloadFreeBSD-src-bbfed93309ad0e43fcd90d67e5ba66ff86d113ff.zip
FreeBSD-src-bbfed93309ad0e43fcd90d67e5ba66ff86d113ff.tar.gz
MFprojects/camlock r249505:
Change CCB queue resize logic to be able safely handle overallocations: - (re)allocate queue space in power of 2 chunks with 64 elements minimum and never shrink it; with only 4/8 bytes per element size is insignificant. - automatically reallocate the queue to double size if it is overflowed. - if queue reallocation failed, store extra CCBs in unsorted TAILQ, fetching them back as soon as some queue element is freed. To free space in CCB for TAILQ linking, change highpowerq from keeping high-power CCBs to keeping devices frozen due to high-power CCBs. This encloses all pieces of queue resize logic inside of cam_queue.[ch], removing some not obvious duties from xpt_release_ccb().
-rw-r--r--sys/cam/cam.h1
-rw-r--r--sys/cam/cam_queue.c31
-rw-r--r--sys/cam/cam_queue.h55
-rw-r--r--sys/cam/cam_xpt.c26
-rw-r--r--sys/cam/cam_xpt_internal.h2
5 files changed, 71 insertions, 44 deletions
diff --git a/sys/cam/cam.h b/sys/cam/cam.h
index 9e53206..a7d51db 100644
--- a/sys/cam/cam.h
+++ b/sys/cam/cam.h
@@ -88,6 +88,7 @@ typedef struct {
#define CAM_UNQUEUED_INDEX -1
#define CAM_ACTIVE_INDEX -2
#define CAM_DONEQ_INDEX -3
+#define CAM_EXTRAQ_INDEX INT_MAX
} cam_pinfo;
/*
diff --git a/sys/cam/cam_queue.c b/sys/cam/cam_queue.c
index 41d0167..daaa9b3 100644
--- a/sys/cam/cam_queue.c
+++ b/sys/cam/cam_queue.c
@@ -284,39 +284,24 @@ u_int32_t
cam_ccbq_resize(struct cam_ccbq *ccbq, int new_size)
{
int delta;
- int space_left;
delta = new_size - (ccbq->dev_active + ccbq->dev_openings);
- space_left = new_size
- - ccbq->queue.entries
- - ccbq->held
- - ccbq->dev_active;
+ ccbq->devq_openings += delta;
+ ccbq->dev_openings += delta;
- /*
- * Only attempt to change the underlying queue size if we are
- * shrinking it and there is space for all outstanding entries
- * in the new array or we have been requested to grow the array.
- * We don't fail in the case where we can't reduce the array size,
- * but clients that care that the queue be "garbage collected"
- * should detect this condition and call us again with the
- * same size once the outstanding entries have been processed.
- */
- if (space_left < 0
- || camq_resize(&ccbq->queue, new_size + (CAM_RL_VALUES - 1)) ==
- CAM_REQ_CMP) {
- ccbq->devq_openings += delta;
- ccbq->dev_openings += delta;
+ new_size = imax(64, 1 << fls(new_size + new_size / 2));
+ if (new_size > ccbq->queue.array_size)
+ return (camq_resize(&ccbq->queue, new_size));
+ else
return (CAM_REQ_CMP);
- } else {
- return (CAM_RESRC_UNAVAIL);
- }
}
int
cam_ccbq_init(struct cam_ccbq *ccbq, int openings)
{
bzero(ccbq, sizeof(*ccbq));
- if (camq_init(&ccbq->queue, openings + (CAM_RL_VALUES - 1)) != 0)
+ if (camq_init(&ccbq->queue,
+ imax(64, 1 << fls(openings + openings / 2))) != 0)
return (1);
ccbq->devq_openings = openings;
ccbq->dev_openings = openings;
diff --git a/sys/cam/cam_queue.h b/sys/cam/cam_queue.h
index 9b27bae..0bb4491 100644
--- a/sys/cam/cam_queue.h
+++ b/sys/cam/cam_queue.h
@@ -57,6 +57,8 @@ SLIST_HEAD(ccb_hdr_slist, ccb_hdr);
struct cam_ccbq {
struct camq queue;
+ struct ccb_hdr_tailq queue_extra_head;
+ int queue_extra_entries;
int devq_openings;
int devq_allocating;
int dev_openings;
@@ -177,7 +179,7 @@ cam_ccbq_release_opening(struct cam_ccbq *ccbq);
static __inline int
cam_ccbq_pending_ccb_count(struct cam_ccbq *ccbq)
{
- return (ccbq->queue.entries);
+ return (ccbq->queue.entries + ccbq->queue_extra_entries);
}
static __inline void
@@ -190,14 +192,61 @@ cam_ccbq_take_opening(struct cam_ccbq *ccbq)
static __inline void
cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb)
{
+ struct ccb_hdr *old_ccb;
+ struct camq *queue = &ccbq->queue;
+
ccbq->held--;
- camq_insert(&ccbq->queue, &new_ccb->ccb_h.pinfo);
+
+ /*
+ * If queue is already full, try to resize.
+ * If resize fail, push CCB with lowest priority out to the TAILQ.
+ */
+ if (queue->entries == queue->array_size &&
+ camq_resize(&ccbq->queue, queue->array_size * 2) != CAM_REQ_CMP) {
+ old_ccb = (struct ccb_hdr *)camq_remove(queue, queue->entries);
+ TAILQ_INSERT_HEAD(&ccbq->queue_extra_head, old_ccb,
+ xpt_links.tqe);
+ old_ccb->pinfo.index = CAM_EXTRAQ_INDEX;
+ ccbq->queue_extra_entries++;
+ }
+
+ camq_insert(queue, &new_ccb->ccb_h.pinfo);
}
static __inline void
cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb)
{
- camq_remove(&ccbq->queue, ccb->ccb_h.pinfo.index);
+ struct ccb_hdr *cccb, *bccb;
+ struct camq *queue = &ccbq->queue;
+
+ /* If the CCB is on the TAILQ, remove it from there. */
+ if (ccb->ccb_h.pinfo.index == CAM_EXTRAQ_INDEX) {
+ TAILQ_REMOVE(&ccbq->queue_extra_head, &ccb->ccb_h,
+ xpt_links.tqe);
+ ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
+ ccbq->queue_extra_entries--;
+ return;
+ }
+
+ camq_remove(queue, ccb->ccb_h.pinfo.index);
+
+ /*
+ * If there are some CCBs on TAILQ, find the best one and move it
+ * to the emptied space in the queue.
+ */
+ bccb = TAILQ_FIRST(&ccbq->queue_extra_head);
+ if (bccb == NULL)
+ return;
+ TAILQ_FOREACH(cccb, &ccbq->queue_extra_head, xpt_links.tqe) {
+ if (bccb->pinfo.priority > cccb->pinfo.priority ||
+ (bccb->pinfo.priority == cccb->pinfo.priority &&
+ GENERATIONCMP(bccb->pinfo.generation, >,
+ cccb->pinfo.generation)))
+ bccb = cccb;
+ }
+ TAILQ_REMOVE(&ccbq->queue_extra_head, bccb, xpt_links.tqe);
+ ccbq->queue_extra_entries--;
+ camq_insert(queue, &bccb->pinfo);
}
static __inline union ccb *
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index aac1be2..8246acd 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -99,7 +99,7 @@ struct xpt_softc {
u_int32_t xpt_generation;
/* number of high powered commands that can go through right now */
- STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
+ STAILQ_HEAD(highpowerlist, cam_ed) highpowerq;
int num_highpower;
/* queue for handling async rescan requests. */
@@ -2684,7 +2684,7 @@ xpt_action_default(union ccb *start_ccb)
cgds->dev_openings = dev->ccbq.dev_openings;
cgds->dev_active = dev->ccbq.dev_active;
cgds->devq_openings = dev->ccbq.devq_openings;
- cgds->devq_queued = dev->ccbq.queue.entries;
+ cgds->devq_queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
cgds->held = dev->ccbq.held;
cgds->last_reset = tar->last_reset;
cgds->maxtags = dev->maxtags;
@@ -3255,8 +3255,8 @@ xpt_run_devq(struct cam_devq *devq)
*/
xpt_freeze_devq(work_ccb->ccb_h.path, 1);
STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
- &work_ccb->ccb_h,
- xpt_links.stqe);
+ work_ccb->ccb_h.path->device,
+ highpowerq_entry);
mtx_unlock(&xsoftc.xpt_lock);
continue;
@@ -3778,11 +3778,6 @@ xpt_release_ccb(union ccb *free_ccb)
mtx_assert(sim->mtx, MA_OWNED);
cam_ccbq_release_opening(&device->ccbq);
- if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
- device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
- cam_ccbq_resize(&device->ccbq,
- device->ccbq.dev_openings + device->ccbq.dev_active);
- }
if (sim->ccb_count > sim->max_ccbs) {
xpt_free_ccb(free_ccb);
sim->ccb_count--;
@@ -4581,9 +4576,6 @@ xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
result = cam_ccbq_resize(&dev->ccbq, newopenings);
- if (result == CAM_REQ_CMP && (diff < 0)) {
- dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
- }
if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
|| (dev->inq_flags & SID_CmdQue) != 0)
dev->tag_saved_openings = newopenings;
@@ -4965,12 +4957,12 @@ camisr_runqueue(void *V_queue)
if (ccb_h->flags & CAM_HIGH_POWER) {
struct highpowerlist *hphead;
- union ccb *send_ccb;
+ struct cam_ed *device;
mtx_lock(&xsoftc.xpt_lock);
hphead = &xsoftc.highpowerq;
- send_ccb = (union ccb *)STAILQ_FIRST(hphead);
+ device = STAILQ_FIRST(hphead);
/*
* Increment the count since this command is done.
@@ -4980,12 +4972,12 @@ camisr_runqueue(void *V_queue)
/*
* Any high powered commands queued up?
*/
- if (send_ccb != NULL) {
+ if (device != NULL) {
- STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
+ STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
mtx_unlock(&xsoftc.xpt_lock);
- xpt_release_devq(send_ccb->ccb_h.path,
+ xpt_release_devq_device(device,
/*count*/1, /*runqueue*/TRUE);
} else
mtx_unlock(&xsoftc.xpt_lock);
diff --git a/sys/cam/cam_xpt_internal.h b/sys/cam/cam_xpt_internal.h
index f58d120..70808ba 100644
--- a/sys/cam/cam_xpt_internal.h
+++ b/sys/cam/cam_xpt_internal.h
@@ -114,7 +114,6 @@ struct cam_ed {
#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
#define CAM_DEV_REL_ON_COMPLETE 0x04
#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
-#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
#define CAM_DEV_TAG_AFTER_COUNT 0x20
#define CAM_DEV_INQUIRY_DATA_VALID 0x40
#define CAM_DEV_IN_DV 0x80
@@ -125,6 +124,7 @@ struct cam_ed {
u_int32_t tag_saved_openings;
u_int32_t refcount;
struct callout callout;
+ STAILQ_ENTRY(cam_ed) highpowerq_entry;
};
/*
OpenPOWER on IntegriCloud