summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/ll_rw_blk.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c847e17..132a858 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -117,7 +117,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
*/
-static void clear_queue_congested(request_queue_t *q, int rw)
+void blk_clear_queue_congested(request_queue_t *q, int rw)
{
enum bdi_state bit;
wait_queue_head_t *wqh = &congestion_wqh[rw];
@@ -128,18 +128,20 @@ static void clear_queue_congested(request_queue_t *q, int rw)
if (waitqueue_active(wqh))
wake_up(wqh);
}
+EXPORT_SYMBOL(blk_clear_queue_congested);
/*
* A queue has just entered congestion. Flag that in the queue's VM-visible
* state flags and increment the global gounter of congested queues.
*/
-static void set_queue_congested(request_queue_t *q, int rw)
+void blk_set_queue_congested(request_queue_t *q, int rw)
{
enum bdi_state bit;
bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
set_bit(bit, &q->backing_dev_info.state);
}
+EXPORT_SYMBOL(blk_set_queue_congested);
/**
* blk_get_backing_dev_info - get the address of a queue's backing_dev_info
@@ -159,7 +161,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
ret = &q->backing_dev_info;
return ret;
}
-
EXPORT_SYMBOL(blk_get_backing_dev_info);
void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
@@ -167,7 +168,6 @@ void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
q->activity_fn = fn;
q->activity_data = data;
}
-
EXPORT_SYMBOL(blk_queue_activity_fn);
/**
@@ -2067,7 +2067,7 @@ static void __freed_request(request_queue_t *q, int rw)
struct request_list *rl = &q->rq;
if (rl->count[rw] < queue_congestion_off_threshold(q))
- clear_queue_congested(q, rw);
+ blk_clear_queue_congested(q, rw);
if (rl->count[rw] + 1 <= q->nr_requests) {
if (waitqueue_active(&rl->wait[rw]))
@@ -2137,7 +2137,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
}
}
}
- set_queue_congested(q, rw);
+ blk_set_queue_congested(q, rw);
}
/*
@@ -3765,14 +3765,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
blk_queue_congestion_threshold(q);
if (rl->count[READ] >= queue_congestion_on_threshold(q))
- set_queue_congested(q, READ);
+ blk_set_queue_congested(q, READ);
else if (rl->count[READ] < queue_congestion_off_threshold(q))
- clear_queue_congested(q, READ);
+ blk_clear_queue_congested(q, READ);
if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
- set_queue_congested(q, WRITE);
+ blk_set_queue_congested(q, WRITE);
else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
- clear_queue_congested(q, WRITE);
+ blk_clear_queue_congested(q, WRITE);
if (rl->count[READ] >= q->nr_requests) {
blk_set_queue_full(q, READ);
OpenPOWER on IntegriCloud