summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_disk.c
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>2002-09-20 14:14:37 +0000
committerphk <phk@FreeBSD.org>2002-09-20 14:14:37 +0000
commit7fd38535eae3b6a4d4995a5d64312946ac90c975 (patch)
tree3a943971e7e89a1029824f9f9d07d4300238f212 /sys/kern/subr_disk.c
parent868209c8920da6141c8bdc1603136cfebd8b224f (diff)
downloadFreeBSD-src-7fd38535eae3b6a4d4995a5d64312946ac90c975.zip
FreeBSD-src-7fd38535eae3b6a4d4995a5d64312946ac90c975.tar.gz
Make FreeBSD "struct disklabel" agnostic, step 312 of 723:
Rename bioqdisksort() to bioq_disksort(). Keep a #define around to avoid changing all diskdrivers right now. Move it from subr_disklabel.c to subr_disk.c. Move prototype from <sys/disklabel.h> to <sys/bio.h> Sponsored by: DARPA and NAI Labs.
Diffstat (limited to 'sys/kern/subr_disk.c')
-rw-r--r--sys/kern/subr_disk.c150
1 files changed, 150 insertions, 0 deletions
diff --git a/sys/kern/subr_disk.c b/sys/kern/subr_disk.c
index 801eaaf..388ea79 100644
--- a/sys/kern/subr_disk.c
+++ b/sys/kern/subr_disk.c
@@ -469,3 +469,153 @@ disk_err(struct bio *bp, const char *what, int blkdone, int nl)
if (nl)
printf("\n");
}
+
+#ifdef notquite
+/*
+ * Mutex to use when delaying niced I/O bound processes in bioq_disksort().
+ */
+static struct mtx dksort_mtx;
+static void
+dksort_init(void)
+{
+
+ mtx_init(&dksort_mtx, "dksort", NULL, MTX_DEF);
+}
+SYSINIT(dksort, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, dksort_init, NULL)
+#endif
+
+/*
+ * Seek sort for disks.
+ *
+ * The buf_queue keep two queues, sorted in ascending block order. The first
+ * queue holds those requests which are positioned after the current block
+ * (in the first request); the second, which starts at queue->switch_point,
+ * holds requests which came in after their block number was passed. Thus
+ * we implement a one way scan, retracting after reaching the end of the drive
+ * to the first request on the second queue, at which time it becomes the
+ * first queue.
+ *
+ * A one-way scan is natural because of the way UNIX read-ahead blocks are
+ * allocated.
+ */
+
+void
+bioq_disksort(bioq, bp)
+ struct bio_queue_head *bioq;
+ struct bio *bp;
+{
+ struct bio *bq;
+ struct bio *bn;
+ struct bio *be;
+
+#ifdef notquite
+ struct thread *td = curthread;
+
+ if (td && td->td_ksegrp->kg_nice > 0) {
+ TAILQ_FOREACH(bn, &bioq->queue, bio_queue)
+ if (BIOTOBUF(bp)->b_vp != BIOTOBUF(bn)->b_vp)
+ break;
+ if (bn != NULL) {
+ mtx_lock(&dksort_mtx);
+ msleep(&dksort_mtx, &dksort_mtx,
+ PPAUSE | PCATCH | PDROP, "ioslow",
+ td->td_ksegrp->kg_nice);
+ }
+ }
+#endif
+ if (!atomic_cmpset_int(&bioq->busy, 0, 1))
+ panic("Recursing in bioq_disksort()");
+ be = TAILQ_LAST(&bioq->queue, bio_queue);
+ /*
+ * If the queue is empty or we are an
+ * ordered transaction, then it's easy.
+ */
+ if ((bq = bioq_first(bioq)) == NULL) {
+ bioq_insert_tail(bioq, bp);
+ bioq->busy = 0;
+ return;
+ } else if (bioq->insert_point != NULL) {
+
+ /*
+ * A certain portion of the list is
+ * "locked" to preserve ordering, so
+ * we can only insert after the insert
+ * point.
+ */
+ bq = bioq->insert_point;
+ } else {
+
+ /*
+ * If we lie before the last removed (currently active)
+ * request, and are not inserting ourselves into the
+ * "locked" portion of the list, then we must add ourselves
+ * to the second request list.
+ */
+ if (bp->bio_pblkno < bioq->last_pblkno) {
+
+ bq = bioq->switch_point;
+ /*
+ * If we are starting a new secondary list,
+ * then it's easy.
+ */
+ if (bq == NULL) {
+ bioq->switch_point = bp;
+ bioq_insert_tail(bioq, bp);
+ bioq->busy = 0;
+ return;
+ }
+ /*
+ * If we lie ahead of the current switch point,
+ * insert us before the switch point and move
+ * the switch point.
+ */
+ if (bp->bio_pblkno < bq->bio_pblkno) {
+ bioq->switch_point = bp;
+ TAILQ_INSERT_BEFORE(bq, bp, bio_queue);
+ bioq->busy = 0;
+ return;
+ }
+ } else {
+ if (bioq->switch_point != NULL)
+ be = TAILQ_PREV(bioq->switch_point,
+ bio_queue, bio_queue);
+ /*
+ * If we lie between last_pblkno and bq,
+ * insert before bq.
+ */
+ if (bp->bio_pblkno < bq->bio_pblkno) {
+ TAILQ_INSERT_BEFORE(bq, bp, bio_queue);
+ bioq->busy = 0;
+ return;
+ }
+ }
+ }
+
+ /*
+ * Request is at/after our current position in the list.
+ * Optimize for sequential I/O by seeing if we go at the tail.
+ */
+ if (bp->bio_pblkno > be->bio_pblkno) {
+ TAILQ_INSERT_AFTER(&bioq->queue, be, bp, bio_queue);
+ bioq->busy = 0;
+ return;
+ }
+
+ /* Otherwise, insertion sort */
+ while ((bn = TAILQ_NEXT(bq, bio_queue)) != NULL) {
+
+ /*
+ * We want to go after the current request if it is the end
+ * of the first request list, or if the next request is a
+ * larger cylinder than our request.
+ */
+ if (bn == bioq->switch_point
+ || bp->bio_pblkno < bn->bio_pblkno)
+ break;
+ bq = bn;
+ }
+ TAILQ_INSERT_AFTER(&bioq->queue, bq, bp, bio_queue);
+ bioq->busy = 0;
+}
+
+
OpenPOWER on IntegriCloud