summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c26
-rw-r--r--block/blktrace.c30
-rw-r--r--block/bsg.c12
-rw-r--r--block/cfq-iosched.c39
-rw-r--r--block/deadline-iosched.c18
-rw-r--r--block/elevator.c75
-rw-r--r--block/ll_rw_blk.c215
-rw-r--r--block/noop-iosched.c14
-rw-r--r--block/scsi_ioctl.c24
9 files changed, 236 insertions, 217 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 3e316dd..dc715a5 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -796,7 +796,7 @@ static void update_write_batch(struct as_data *ad)
* as_completed_request is to be called when a request has completed and
* returned something to the requesting process, be it an error or data.
*/
-static void as_completed_request(request_queue_t *q, struct request *rq)
+static void as_completed_request(struct request_queue *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
@@ -853,7 +853,8 @@ out:
* reference unless it replaces the request at somepart of the elevator
* (ie. the dispatch queue)
*/
-static void as_remove_queued_request(request_queue_t *q, struct request *rq)
+static void as_remove_queued_request(struct request_queue *q,
+ struct request *rq)
{
const int data_dir = rq_is_sync(rq);
struct as_data *ad = q->elevator->elevator_data;
@@ -978,7 +979,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
* read/write expire, batch expire, etc, and moves it to the dispatch
* queue. Returns 1 if a request was found, 0 otherwise.
*/
-static int as_dispatch_request(request_queue_t *q, int force)
+static int as_dispatch_request(struct request_queue *q, int force)
{
struct as_data *ad = q->elevator->elevator_data;
const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
@@ -1139,7 +1140,7 @@ fifo_expired:
/*
* add rq to rbtree and fifo
*/
-static void as_add_request(request_queue_t *q, struct request *rq)
+static void as_add_request(struct request_queue *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
int data_dir;
@@ -1167,7 +1168,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
RQ_SET_STATE(rq, AS_RQ_QUEUED);
}
-static void as_activate_request(request_queue_t *q, struct request *rq)
+static void as_activate_request(struct request_queue *q, struct request *rq)
{
WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
RQ_SET_STATE(rq, AS_RQ_REMOVED);
@@ -1175,7 +1176,7 @@ static void as_activate_request(request_queue_t *q, struct request *rq)
atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
}
-static void as_deactivate_request(request_queue_t *q, struct request *rq)
+static void as_deactivate_request(struct request_queue *q, struct request *rq)
{
WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
@@ -1189,7 +1190,7 @@ static void as_deactivate_request(request_queue_t *q, struct request *rq)
* is not empty - it is used in the block layer to check for plugging and
* merging opportunities
*/
-static int as_queue_empty(request_queue_t *q)
+static int as_queue_empty(struct request_queue *q)
{
struct as_data *ad = q->elevator->elevator_data;
@@ -1198,7 +1199,7 @@ static int as_queue_empty(request_queue_t *q)
}
static int
-as_merge(request_queue_t *q, struct request **req, struct bio *bio)
+as_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct as_data *ad = q->elevator->elevator_data;
sector_t rb_key = bio->bi_sector + bio_sectors(bio);
@@ -1216,7 +1217,8 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
-static void as_merged_request(request_queue_t *q, struct request *req, int type)
+static void as_merged_request(struct request_queue *q, struct request *req,
+ int type)
{
struct as_data *ad = q->elevator->elevator_data;
@@ -1234,7 +1236,7 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type)
}
}
-static void as_merged_requests(request_queue_t *q, struct request *req,
+static void as_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
@@ -1285,7 +1287,7 @@ static void as_work_handler(struct work_struct *work)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static int as_may_queue(request_queue_t *q, int rw)
+static int as_may_queue(struct request_queue *q, int rw)
{
int ret = ELV_MQUEUE_MAY;
struct as_data *ad = q->elevator->elevator_data;
@@ -1318,7 +1320,7 @@ static void as_exit_queue(elevator_t *e)
/*
* initialize elevator private data (as_data).
*/
-static void *as_init_queue(request_queue_t *q)
+static void *as_init_queue(struct request_queue *q)
{
struct as_data *ad;
diff --git a/block/blktrace.c b/block/blktrace.c
index 3f0e7c3..20fa034 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -41,7 +41,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
const int cpu = smp_processor_id();
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
- t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
+ t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
t->device = bt->dev;
t->action = action;
t->pid = pid;
@@ -159,7 +159,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t->sequence = ++(*sequence);
- t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
+ t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
t->sector = sector;
t->bytes = bytes;
t->action = what;
@@ -231,7 +231,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
kfree(bt);
}
-static int blk_trace_remove(request_queue_t *q)
+static int blk_trace_remove(struct request_queue *q)
{
struct blk_trace *bt;
@@ -312,7 +312,7 @@ static struct rchan_callbacks blk_relay_callbacks = {
/*
* Setup everything required to start tracing
*/
-static int blk_trace_setup(request_queue_t *q, struct block_device *bdev,
+static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
{
struct blk_user_trace_setup buts;
@@ -401,7 +401,7 @@ err:
return ret;
}
-static int blk_trace_startstop(request_queue_t *q, int start)
+static int blk_trace_startstop(struct request_queue *q, int start)
{
struct blk_trace *bt;
int ret;
@@ -444,7 +444,7 @@ static int blk_trace_startstop(request_queue_t *q, int start)
**/
int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{
- request_queue_t *q;
+ struct request_queue *q;
int ret, start = 0;
q = bdev_get_queue(bdev);
@@ -479,7 +479,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
* @q: the request queue associated with the device
*
**/
-void blk_trace_shutdown(request_queue_t *q)
+void blk_trace_shutdown(struct request_queue *q)
{
if (q->blk_trace) {
blk_trace_startstop(q, 0);
@@ -488,17 +488,17 @@ void blk_trace_shutdown(request_queue_t *q)
}
/*
- * Average offset over two calls to sched_clock() with a gettimeofday()
+ * Average offset over two calls to cpu_clock() with a gettimeofday()
* in the middle
*/
-static void blk_check_time(unsigned long long *t)
+static void blk_check_time(unsigned long long *t, int this_cpu)
{
unsigned long long a, b;
struct timeval tv;
- a = sched_clock();
+ a = cpu_clock(this_cpu);
do_gettimeofday(&tv);
- b = sched_clock();
+ b = cpu_clock(this_cpu);
*t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
*t -= (a + b) / 2;
@@ -510,16 +510,16 @@ static void blk_check_time(unsigned long long *t)
static void blk_trace_check_cpu_time(void *data)
{
unsigned long long *t;
- int cpu = get_cpu();
+ int this_cpu = get_cpu();
- t = &per_cpu(blk_trace_cpu_offset, cpu);
+ t = &per_cpu(blk_trace_cpu_offset, this_cpu);
/*
* Just call it twice, hopefully the second call will be cache hot
* and a little more precise
*/
- blk_check_time(t);
- blk_check_time(t);
+ blk_check_time(t, this_cpu);
+ blk_check_time(t, this_cpu);
put_cpu();
}
diff --git a/block/bsg.c b/block/bsg.c
index 12c287b..d60eee5 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -31,7 +31,7 @@
#define BSG_VERSION "0.4"
struct bsg_device {
- request_queue_t *queue;
+ struct request_queue *queue;
spinlock_t lock;
struct list_head busy_list;
struct list_head done_list;
@@ -172,7 +172,7 @@ unlock:
return ret;
}
-static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
+static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, int has_write_perm)
{
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@@ -206,7 +206,7 @@ static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
* Check if sg_io_v4 from user is allowed and valid
*/
static int
-bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
+bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
{
int ret = 0;
@@ -242,7 +242,7 @@ bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
static struct request *
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
- request_queue_t *q = bd->queue;
+ struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw;
unsigned int dxfer_len;
@@ -337,7 +337,7 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
* do final setup of a 'bc' and submit the matching 'rq' to the block
* layer for io
*/
-static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
+static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
rq->sense = bc->sense;
@@ -603,7 +603,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
bc = NULL;
ret = 0;
while (nr_commands) {
- request_queue_t *q = bd->queue;
+ struct request_queue *q = bd->queue;
bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d148ccb..54dc054 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -71,7 +71,7 @@ struct cfq_rb_root {
* Per block device queue structure
*/
struct cfq_data {
- request_queue_t *queue;
+ struct request_queue *queue;
/*
* rr list of queues with requests and the count of them
@@ -197,7 +197,7 @@ CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
#undef CFQ_CFQQ_FNS
-static void cfq_dispatch_insert(request_queue_t *, struct request *);
+static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
struct task_struct *, gfp_t);
static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
@@ -237,7 +237,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
kblockd_schedule_work(&cfqd->unplug_work);
}
-static int cfq_queue_empty(request_queue_t *q)
+static int cfq_queue_empty(struct request_queue *q)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -623,7 +623,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
return NULL;
}
-static void cfq_activate_request(request_queue_t *q, struct request *rq)
+static void cfq_activate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -641,7 +641,7 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
}
-static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
+static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -665,7 +665,8 @@ static void cfq_remove_request(struct request *rq)
}
}
-static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
+static int cfq_merge(struct request_queue *q, struct request **req,
+ struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq;
@@ -679,7 +680,7 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
-static void cfq_merged_request(request_queue_t *q, struct request *req,
+static void cfq_merged_request(struct request_queue *q, struct request *req,
int type)
{
if (type == ELEVATOR_FRONT_MERGE) {
@@ -690,7 +691,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req,
}
static void
-cfq_merged_requests(request_queue_t *q, struct request *rq,
+cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
/*
@@ -703,7 +704,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
cfq_remove_request(next);
}
-static int cfq_allow_merge(request_queue_t *q, struct request *rq,
+static int cfq_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -913,7 +914,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
/*
* Move request from internal lists to the request queue dispatch list.
*/
-static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
+static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1093,7 +1094,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
return dispatched;
}
-static int cfq_dispatch_requests(request_queue_t *q, int force)
+static int cfq_dispatch_requests(struct request_queue *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
@@ -1214,7 +1215,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
struct cfq_data *cfqd = cic->key;
if (cfqd) {
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
__cfq_exit_single_io_context(cfqd, cic);
@@ -1775,7 +1776,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
}
-static void cfq_insert_request(request_queue_t *q, struct request *rq)
+static void cfq_insert_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1789,7 +1790,7 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq)
cfq_rq_enqueued(cfqd, cfqq, rq);
}
-static void cfq_completed_request(request_queue_t *q, struct request *rq)
+static void cfq_completed_request(struct request_queue *q, struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
struct cfq_data *cfqd = cfqq->cfqd;
@@ -1868,7 +1869,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
return ELV_MQUEUE_MAY;
}
-static int cfq_may_queue(request_queue_t *q, int rw)
+static int cfq_may_queue(struct request_queue *q, int rw)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -1922,7 +1923,7 @@ static void cfq_put_request(struct request *rq)
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -1974,7 +1975,7 @@ static void cfq_kick_queue(struct work_struct *work)
{
struct cfq_data *cfqd =
container_of(work, struct cfq_data, unplug_work);
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@ -2072,7 +2073,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
static void cfq_exit_queue(elevator_t *e)
{
struct cfq_data *cfqd = e->elevator_data;
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
cfq_shutdown_timer_wq(cfqd);
@@ -2098,7 +2099,7 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
-static void *cfq_init_queue(request_queue_t *q)
+static void *cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 87ca02a..1a511ff 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* remove rq from rbtree and fifo.
*/
-static void deadline_remove_request(request_queue_t *q, struct request *rq)
+static void deadline_remove_request(struct request_queue *q, struct request *rq)
{
struct deadline_data *dd = q->elevator->elevator_data;
@@ -115,7 +115,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
}
static int
-deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
+deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct request *__rq;
@@ -144,8 +144,8 @@ out:
return ret;
}
-static void deadline_merged_request(request_queue_t *q, struct request *req,
- int type)
+static void deadline_merged_request(struct request_queue *q,
+ struct request *req, int type)
{
struct deadline_data *dd = q->elevator->elevator_data;
@@ -159,7 +159,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req,
}
static void
-deadline_merged_requests(request_queue_t *q, struct request *req,
+deadline_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
@@ -185,7 +185,7 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
static inline void
deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
deadline_remove_request(q, rq);
elv_dispatch_add_tail(q, rq);
@@ -236,7 +236,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
-static int deadline_dispatch_requests(request_queue_t *q, int force)
+static int deadline_dispatch_requests(struct request_queue *q, int force)
{
struct deadline_data *dd = q->elevator->elevator_data;
const int reads = !list_empty(&dd->fifo_list[READ]);
@@ -335,7 +335,7 @@ dispatch_request:
return 1;
}
-static int deadline_queue_empty(request_queue_t *q)
+static int deadline_queue_empty(struct request_queue *q)
{
struct deadline_data *dd = q->elevator->elevator_data;
@@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(request_queue_t *q)
+static void *deadline_init_queue(struct request_queue *q)
{
struct deadline_data *dd;
diff --git a/block/elevator.c b/block/elevator.c
index d265963..c6d153d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -56,7 +56,7 @@ static const int elv_hash_shift = 6;
*/
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
elevator_t *e = q->elevator;
if (e->ops->elevator_allow_merge_fn)
@@ -141,12 +141,13 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
-static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
+static void *elevator_init_queue(struct request_queue *q,
+ struct elevator_queue *eq)
{
return eq->ops->elevator_init_fn(q);
}
-static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
void *data)
{
q->elevator = eq;
@@ -172,7 +173,8 @@ __setup("elevator=", elevator_setup);
static struct kobj_type elv_ktype;
-static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
+static elevator_t *elevator_alloc(struct request_queue *q,
+ struct elevator_type *e)
{
elevator_t *eq;
int i;
@@ -212,7 +214,7 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-int elevator_init(request_queue_t *q, char *name)
+int elevator_init(struct request_queue *q, char *name)
{
struct elevator_type *e = NULL;
struct elevator_queue *eq;
@@ -264,7 +266,7 @@ void elevator_exit(elevator_t *e)
EXPORT_SYMBOL(elevator_exit);
-static void elv_activate_rq(request_queue_t *q, struct request *rq)
+static void elv_activate_rq(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -272,7 +274,7 @@ static void elv_activate_rq(request_queue_t *q, struct request *rq)
e->ops->elevator_activate_req_fn(q, rq);
}
-static void elv_deactivate_rq(request_queue_t *q, struct request *rq)
+static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -285,13 +287,13 @@ static inline void __elv_rqhash_del(struct request *rq)
hlist_del_init(&rq->hash);
}
-static void elv_rqhash_del(request_queue_t *q, struct request *rq)
+static void elv_rqhash_del(struct request_queue *q, struct request *rq)
{
if (ELV_ON_HASH(rq))
__elv_rqhash_del(rq);
}
-static void elv_rqhash_add(request_queue_t *q, struct request *rq)
+static void elv_rqhash_add(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -299,13 +301,13 @@ static void elv_rqhash_add(request_queue_t *q, struct request *rq)
hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
}
-static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
+static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
{
__elv_rqhash_del(rq);
elv_rqhash_add(q, rq);
}
-static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
+static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
elevator_t *e = q->elevator;
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
@@ -391,7 +393,7 @@ EXPORT_SYMBOL(elv_rb_find);
* entry. rq is sort insted into the dispatch queue. To be used by
* specific elevators.
*/
-void elv_dispatch_sort(request_queue_t *q, struct request *rq)
+void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{
sector_t boundary;
struct list_head *entry;
@@ -449,7 +451,7 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
EXPORT_SYMBOL(elv_dispatch_add_tail);
-int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
+int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
struct request *__rq;
@@ -481,7 +483,7 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
-void elv_merged_request(request_queue_t *q, struct request *rq, int type)
+void elv_merged_request(struct request_queue *q, struct request *rq, int type)
{
elevator_t *e = q->elevator;
@@ -494,7 +496,7 @@ void elv_merged_request(request_queue_t *q, struct request *rq, int type)
q->last_merge = rq;
}
-void elv_merge_requests(request_queue_t *q, struct request *rq,
+void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
elevator_t *e = q->elevator;
@@ -509,7 +511,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
q->last_merge = rq;
}
-void elv_requeue_request(request_queue_t *q, struct request *rq)
+void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
* it already went through dequeue, we need to decrement the
@@ -526,7 +528,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
}
-static void elv_drain_elevator(request_queue_t *q)
+static void elv_drain_elevator(struct request_queue *q)
{
static int printed;
while (q->elevator->ops->elevator_dispatch_fn(q, 1))
@@ -540,7 +542,7 @@ static void elv_drain_elevator(request_queue_t *q)
}
}
-void elv_insert(request_queue_t *q, struct request *rq, int where)
+void elv_insert(struct request_queue *q, struct request *rq, int where)
{
struct list_head *pos;
unsigned ordseq;
@@ -638,7 +640,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
}
}
-void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+void __elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
if (q->ordcolor)
@@ -676,7 +678,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
EXPORT_SYMBOL(__elv_add_request);
-void elv_add_request(request_queue_t *q, struct request *rq, int where,
+void elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
unsigned long flags;
@@ -688,7 +690,7 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
EXPORT_SYMBOL(elv_add_request);
-static inline struct request *__elv_next_request(request_queue_t *q)
+static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
@@ -704,7 +706,7 @@ static inline struct request *__elv_next_request(request_queue_t *q)
}
}
-struct request *elv_next_request(request_queue_t *q)
+struct request *elv_next_request(struct request_queue *q)
{
struct request *rq;
int ret;
@@ -770,7 +772,7 @@ struct request *elv_next_request(request_queue_t *q)
EXPORT_SYMBOL(elv_next_request);
-void elv_dequeue_request(request_queue_t *q, struct request *rq)
+void elv_dequeue_request(struct request_queue *q, struct request *rq)
{
BUG_ON(list_empty(&rq->queuelist));
BUG_ON(ELV_ON_HASH(rq));
@@ -788,7 +790,7 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(elv_dequeue_request);
-int elv_queue_empty(request_queue_t *q)
+int elv_queue_empty(struct request_queue *q)
{
elevator_t *e = q->elevator;
@@ -803,7 +805,7 @@ int elv_queue_empty(request_queue_t *q)
EXPORT_SYMBOL(elv_queue_empty);
-struct request *elv_latter_request(request_queue_t *q, struct request *rq)
+struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -812,7 +814,7 @@ struct request *elv_latter_request(request_queue_t *q, struct request *rq)
return NULL;
}
-struct request *elv_former_request(request_queue_t *q, struct request *rq)
+struct request *elv_former_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -821,7 +823,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
return NULL;
}
-int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
elevator_t *e = q->elevator;
@@ -832,7 +834,7 @@ int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
return 0;
}
-void elv_put_request(request_queue_t *q, struct request *rq)
+void elv_put_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -840,7 +842,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
e->ops->elevator_put_req_fn(rq);
}
-int elv_may_queue(request_queue_t *q, int rw)
+int elv_may_queue(struct request_queue *q, int rw)
{
elevator_t *e = q->elevator;
@@ -850,7 +852,7 @@ int elv_may_queue(request_queue_t *q, int rw)
return ELV_MQUEUE_MAY;
}
-void elv_completed_request(request_queue_t *q, struct request *rq)
+void elv_completed_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -1006,7 +1008,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
* need for the new one. this way we have a chance of going back to the old
* one, if the new one fails init for some reason.
*/
-static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
elevator_t *old_elevator, *e;
void *data;
@@ -1078,7 +1080,8 @@ fail_register:
return 0;
}
-ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ size_t count)
{
char elevator_name[ELV_NAME_MAX];
size_t len;
@@ -1107,7 +1110,7 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
return count;
}
-ssize_t elv_iosched_show(request_queue_t *q, char *name)
+ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
elevator_t *e = q->elevator;
struct elevator_type *elv = e->elevator_type;
@@ -1127,7 +1130,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
return len;
}
-struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
+struct request *elv_rb_former_request(struct request_queue *q,
+ struct request *rq)
{
struct rb_node *rbprev = rb_prev(&rq->rb_node);
@@ -1139,7 +1143,8 @@ struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(elv_rb_former_request);
-struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
+struct request *elv_rb_latter_request(struct request_queue *q,
+ struct request *rq)
{
struct rb_node *rbnext = rb_next(&rq->rb_node);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 66056ca..8c2caff 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -40,7 +40,7 @@ static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
-static int __make_request(request_queue_t *q, struct bio *bio);
+static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
@@ -121,7 +121,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
struct backing_dev_info *ret = NULL;
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
if (q)
ret = &q->backing_dev_info;
@@ -140,7 +140,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info);
* cdb from the request data for instance.
*
*/
-void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
+void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
q->prep_rq_fn = pfn;
}
@@ -163,14 +163,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq);
* no merge_bvec_fn is defined for a queue, and only the fixed limits are
* honored.
*/
-void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
+void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
q->merge_bvec_fn = mbfn;
}
EXPORT_SYMBOL(blk_queue_merge_bvec);
-void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
+void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
q->softirq_done_fn = fn;
}
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
-void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
+void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
{
/*
* set defaults
@@ -235,7 +235,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
EXPORT_SYMBOL(blk_queue_make_request);
-static void rq_init(request_queue_t *q, struct request *rq)
+static void rq_init(struct request_queue *q, struct request *rq)
{
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
@@ -272,7 +272,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
* feature should call this function and indicate so.
*
**/
-int blk_queue_ordered(request_queue_t *q, unsigned ordered,
+int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn)
{
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
@@ -311,7 +311,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
* to the block layer by defining it through this call.
*
**/
-void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
+void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
{
q->issue_flush_fn = iff;
}
@@ -321,7 +321,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
/*
* Cache flushing for ordered writes handling
*/
-inline unsigned blk_ordered_cur_seq(request_queue_t *q)
+inline unsigned blk_ordered_cur_seq(struct request_queue *q)
{
if (!q->ordseq)
return 0;
@@ -330,7 +330,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q)
unsigned blk_ordered_req_seq(struct request *rq)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
BUG_ON(q->ordseq == 0);
@@ -357,7 +357,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
return QUEUE_ORDSEQ_DONE;
}
-void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
+void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
{
struct request *rq;
int uptodate;
@@ -401,7 +401,7 @@ static void post_flush_end_io(struct request *rq, int error)
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
}
-static void queue_flush(request_queue_t *q, unsigned which)
+static void queue_flush(struct request_queue *q, unsigned which)
{
struct request *rq;
rq_end_io_fn *end_io;
@@ -425,7 +425,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
-static inline struct request *start_ordered(request_queue_t *q,
+static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{
q->bi_size = 0;
@@ -476,7 +476,7 @@ static inline struct request *start_ordered(request_queue_t *q,
return rq;
}
-int blk_do_ordered(request_queue_t *q, struct request **rqp)
+int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
@@ -527,7 +527,7 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
- request_queue_t *q = bio->bi_private;
+ struct request_queue *q = bio->bi_private;
/*
* This is dry run, restore bio_sector and size. We'll finish
@@ -551,7 +551,7 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
static int ordered_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
bio_end_io_t *endio;
void *private;
@@ -588,7 +588,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio,
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
* buffers for doing I/O to pages residing above @page.
**/
-void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
int dma = 0;
@@ -624,7 +624,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Enables a low level driver to set an upper limit on the size of
* received requests.
**/
-void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
+void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -651,7 +651,8 @@ EXPORT_SYMBOL(blk_queue_max_sectors);
* physical data segments in a request. This would be the largest sized
* scatter list the driver could handle.
**/
-void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_phys_segments(struct request_queue *q,
+ unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
@@ -674,7 +675,8 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
* address/length pairs the host adapter can actually give as once
* to the device.
**/
-void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_hw_segments(struct request_queue *q,
+ unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
@@ -695,7 +697,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
* Enables a low level driver to set an upper limit on the size of a
* coalesced segment
**/
-void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
+void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
@@ -718,7 +720,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
* even internal read-modify-write operations). Usually the default
* of 512 covers most hardware.
**/
-void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
+void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
{
q->hardsect_size = size;
}
@@ -735,7 +737,7 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
* @t: the stacking driver (top)
* @b: the underlying device (bottom)
**/
-void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
+void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
/* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
@@ -756,7 +758,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* @q: the request queue for the device
* @mask: the memory boundary mask
**/
-void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
@@ -778,7 +780,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
* this is used when buiding direct io requests for the queue.
*
**/
-void blk_queue_dma_alignment(request_queue_t *q, int mask)
+void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
q->dma_alignment = mask;
}
@@ -796,7 +798,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
*
* no locks need be held.
**/
-struct request *blk_queue_find_tag(request_queue_t *q, int tag)
+struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{
return blk_map_queue_find_tag(q->queue_tags, tag);
}
@@ -840,7 +842,7 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
* blk_cleanup_queue() will take care of calling this function, if tagging
* has been used. So there's no need to call this directly.
**/
-static void __blk_queue_free_tags(request_queue_t *q)
+static void __blk_queue_free_tags(struct request_queue *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
@@ -877,7 +879,7 @@ EXPORT_SYMBOL(blk_free_tags);
* This is used to disabled tagged queuing to a device, yet leave
* queue in function.
**/
-void blk_queue_free_tags(request_queue_t *q)
+void blk_queue_free_tags(struct request_queue *q)
{
clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
}
@@ -885,7 +887,7 @@ void blk_queue_free_tags(request_queue_t *q)
EXPORT_SYMBOL(blk_queue_free_tags);
static int
-init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
{
struct request **tag_index;
unsigned long *tag_map;
@@ -955,7 +957,7 @@ EXPORT_SYMBOL(blk_init_tags);
* @depth: the maximum queue depth supported
* @tags: the tag to use
**/
-int blk_queue_init_tags(request_queue_t *q, int depth,
+int blk_queue_init_tags(struct request_queue *q, int depth,
struct blk_queue_tag *tags)
{
int rc;
@@ -996,7 +998,7 @@ EXPORT_SYMBOL(blk_queue_init_tags);
* Notes:
* Must be called with the queue lock held.
**/
-int blk_queue_resize_tags(request_queue_t *q, int new_depth)
+int blk_queue_resize_tags(struct request_queue *q, int new_depth)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct request **tag_index;
@@ -1059,7 +1061,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
* Notes:
* queue lock must be held.
**/
-void blk_queue_end_tag(request_queue_t *q, struct request *rq)
+void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
int tag = rq->tag;
@@ -1111,7 +1113,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
* Notes:
* queue lock must be held.
**/
-int blk_queue_start_tag(request_queue_t *q, struct request *rq)
+int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
int tag;
@@ -1158,7 +1160,7 @@ EXPORT_SYMBOL(blk_queue_start_tag);
* Notes:
* queue lock must be held.
**/
-void blk_queue_invalidate_tags(request_queue_t *q)
+void blk_queue_invalidate_tags(struct request_queue *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct list_head *tmp, *n;
@@ -1205,7 +1207,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
EXPORT_SYMBOL(blk_dump_rq_flags);
-void blk_recount_segments(request_queue_t *q, struct bio *bio)
+void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
struct bio_vec *bv, *bvprv = NULL;
int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
@@ -1267,7 +1269,7 @@ new_hw_segment:
}
EXPORT_SYMBOL(blk_recount_segments);
-static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1288,7 +1290,7 @@ static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
return 0;
}
-static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1308,7 +1310,8 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
*/
-int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
struct bio *bio;
@@ -1361,7 +1364,7 @@ EXPORT_SYMBOL(blk_rq_map_sg);
* specific ones if so desired
*/
-static inline int ll_new_mergeable(request_queue_t *q,
+static inline int ll_new_mergeable(struct request_queue *q,
struct request *req,
struct bio *bio)
{
@@ -1382,7 +1385,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
return 1;
}
-static inline int ll_new_hw_segment(request_queue_t *q,
+static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
{
@@ -1406,7 +1409,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
return 1;
}
-int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
+int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
{
unsigned short max_sectors;
int len;
@@ -1444,7 +1447,7 @@ int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
}
EXPORT_SYMBOL(ll_back_merge_fn);
-static int ll_front_merge_fn(request_queue_t *q, struct request *req,
+static int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
@@ -1483,7 +1486,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
return ll_new_hw_segment(q, req, bio);
}
-static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
+static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
int total_phys_segments;
@@ -1539,7 +1542,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
* This is called with interrupts off and no requests on the queue and
* with the queue lock held.
*/
-void blk_plug_device(request_queue_t *q)
+void blk_plug_device(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
@@ -1562,7 +1565,7 @@ EXPORT_SYMBOL(blk_plug_device);
* remove the queue from the plugged list, if present. called with
* queue lock held and interrupts disabled.
*/
-int blk_remove_plug(request_queue_t *q)
+int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
@@ -1578,7 +1581,7 @@ EXPORT_SYMBOL(blk_remove_plug);
/*
* remove the plug and let it rip..
*/
-void __generic_unplug_device(request_queue_t *q)
+void __generic_unplug_device(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
@@ -1592,7 +1595,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
/**
* generic_unplug_device - fire a request queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* Linux uses plugging to build bigger requests queues before letting
@@ -1601,7 +1604,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
* gets unplugged, the request_fn defined for the queue is invoked and
* transfers started.
**/
-void generic_unplug_device(request_queue_t *q)
+void generic_unplug_device(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
__generic_unplug_device(q);
@@ -1612,7 +1615,7 @@ EXPORT_SYMBOL(generic_unplug_device);
static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
struct page *page)
{
- request_queue_t *q = bdi->unplug_io_data;
+ struct request_queue *q = bdi->unplug_io_data;
/*
* devices don't necessarily have an ->unplug_fn defined
@@ -1627,7 +1630,8 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
static void blk_unplug_work(struct work_struct *work)
{
- request_queue_t *q = container_of(work, request_queue_t, unplug_work);
+ struct request_queue *q =
+ container_of(work, struct request_queue, unplug_work);
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1637,7 +1641,7 @@ static void blk_unplug_work(struct work_struct *work)
static void blk_unplug_timeout(unsigned long data)
{
- request_queue_t *q = (request_queue_t *)data;
+ struct request_queue *q = (struct request_queue *)data;
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1647,14 +1651,14 @@ static void blk_unplug_timeout(unsigned long data)
/**
* blk_start_queue - restart a previously stopped queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Queue lock must be held.
**/
-void blk_start_queue(request_queue_t *q)
+void blk_start_queue(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
@@ -1677,7 +1681,7 @@ EXPORT_SYMBOL(blk_start_queue);
/**
* blk_stop_queue - stop a queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* The Linux block layer assumes that a block driver will consume all
@@ -1689,7 +1693,7 @@ EXPORT_SYMBOL(blk_start_queue);
* the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations. Queue lock must be held.
**/
-void blk_stop_queue(request_queue_t *q)
+void blk_stop_queue(struct request_queue *q)
{
blk_remove_plug(q);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
@@ -1746,7 +1750,7 @@ void blk_run_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_run_queue);
/**
- * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
+ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
* @kobj: the kobj belonging of the request queue to be released
*
* Description:
@@ -1762,7 +1766,8 @@ EXPORT_SYMBOL(blk_run_queue);
**/
static void blk_release_queue(struct kobject *kobj)
{
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
struct request_list *rl = &q->rq;
blk_sync_queue(q);
@@ -1778,13 +1783,13 @@ static void blk_release_queue(struct kobject *kobj)
kmem_cache_free(requestq_cachep, q);
}
-void blk_put_queue(request_queue_t *q)
+void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
EXPORT_SYMBOL(blk_put_queue);
-void blk_cleanup_queue(request_queue_t * q)
+void blk_cleanup_queue(struct request_queue * q)
{
mutex_lock(&q->sysfs_lock);
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -1798,7 +1803,7 @@ void blk_cleanup_queue(request_queue_t * q)
EXPORT_SYMBOL(blk_cleanup_queue);
-static int blk_init_free_list(request_queue_t *q)
+static int blk_init_free_list(struct request_queue *q)
{
struct request_list *rl = &q->rq;
@@ -1817,7 +1822,7 @@ static int blk_init_free_list(request_queue_t *q)
return 0;
}
-request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
+struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, -1);
}
@@ -1825,9 +1830,9 @@ EXPORT_SYMBOL(blk_alloc_queue);
static struct kobj_type queue_ktype;
-request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
- request_queue_t *q;
+ struct request_queue *q;
q = kmem_cache_alloc_node(requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
@@ -1882,16 +1887,16 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
* when the block device is deactivated (such as at module unload).
**/
-request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
return blk_init_queue_node(rfn, lock, -1);
}
EXPORT_SYMBOL(blk_init_queue);
-request_queue_t *
+struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
- request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
if (!q)
return NULL;
@@ -1940,7 +1945,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
}
EXPORT_SYMBOL(blk_init_queue_node);
-int blk_get_queue(request_queue_t *q)
+int blk_get_queue(struct request_queue *q)
{
if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
kobject_get(&q->kobj);
@@ -1952,7 +1957,7 @@ int blk_get_queue(request_queue_t *q)
EXPORT_SYMBOL(blk_get_queue);
-static inline void blk_free_request(request_queue_t *q, struct request *rq)
+static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq);
@@ -1960,7 +1965,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
}
static struct request *
-blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
+blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
@@ -1988,7 +1993,7 @@ blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
*/
-static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
+static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
{
if (!ioc)
return 0;
@@ -2009,7 +2014,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
* is the behaviour we want though - once it gets a wakeup it should be given
* a nice run.
*/
-static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
{
if (!ioc || ioc_batching(q, ioc))
return;
@@ -2018,7 +2023,7 @@ static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
ioc->last_waited = jiffies;
}
-static void __freed_request(request_queue_t *q, int rw)
+static void __freed_request(struct request_queue *q, int rw)
{
struct request_list *rl = &q->rq;
@@ -2037,7 +2042,7 @@ static void __freed_request(request_queue_t *q, int rw)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(request_queue_t *q, int rw, int priv)
+static void freed_request(struct request_queue *q, int rw, int priv)
{
struct request_list *rl = &q->rq;
@@ -2057,7 +2062,7 @@ static void freed_request(request_queue_t *q, int rw, int priv)
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
-static struct request *get_request(request_queue_t *q, int rw_flags,
+static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
struct request *rq = NULL;
@@ -2162,7 +2167,7 @@ out:
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw_flags,
+static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct bio *bio)
{
const int rw = rw_flags & 0x01;
@@ -2204,7 +2209,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags,
return rq;
}
-struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
struct request *rq;
@@ -2234,7 +2239,7 @@ EXPORT_SYMBOL(blk_get_request);
*
* The queue lock must be held with interrupts disabled.
*/
-void blk_start_queueing(request_queue_t *q)
+void blk_start_queueing(struct request_queue *q)
{
if (!blk_queue_plugged(q))
q->request_fn(q);
@@ -2253,7 +2258,7 @@ EXPORT_SYMBOL(blk_start_queueing);
* more, when that condition happens we need to put the request back
* on the queue. Must be called with queue lock held.
*/
-void blk_requeue_request(request_queue_t *q, struct request *rq)
+void blk_requeue_request(struct request_queue *q, struct request *rq)
{
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
@@ -2284,7 +2289,7 @@ EXPORT_SYMBOL(blk_requeue_request);
* of the queue for things like a QUEUE_FULL message from a device, or a
* host that is unable to accept a particular command.
*/
-void blk_insert_request(request_queue_t *q, struct request *rq,
+void blk_insert_request(struct request_queue *q, struct request *rq,
int at_head, void *data)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
@@ -2330,7 +2335,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret;
}
-static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
unsigned long uaddr;
@@ -2403,8 +2408,8 @@ unmap_bio:
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
-int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
- unsigned long len)
+int blk_rq_map_user(struct request_queue *q, struct request *rq,
+ void __user *ubuf, unsigned long len)
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
@@ -2470,7 +2475,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
-int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
+int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct sg_iovec *iov, int iov_count, unsigned int len)
{
struct bio *bio;
@@ -2540,7 +2545,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
* @len: length of user data
* @gfp_mask: memory allocation flags
*/
-int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
+int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
@@ -2577,7 +2582,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
* Insert a fully prepared request at the back of the io scheduler queue
* for execution. Don't wait for completion.
*/
-void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
+void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
rq_end_io_fn *done)
{
@@ -2605,7 +2610,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
* Insert a fully prepared request at the back of the io scheduler queue
* for execution and wait for completion.
*/
-int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
+int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
@@ -2648,7 +2653,7 @@ EXPORT_SYMBOL(blk_execute_rq);
*/
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
- request_queue_t *q;
+ struct request_queue *q;
if (bdev->bd_disk == NULL)
return -ENXIO;
@@ -2684,7 +2689,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
* queue lock is held and interrupts disabled, as we muck with the
* request queue list.
*/
-static inline void add_request(request_queue_t * q, struct request * req)
+static inline void add_request(struct request_queue * q, struct request * req)
{
drive_stat_acct(req, req->nr_sectors, 1);
@@ -2730,7 +2735,7 @@ EXPORT_SYMBOL_GPL(disk_round_stats);
/*
* queue lock must be held
*/
-void __blk_put_request(request_queue_t *q, struct request *req)
+void __blk_put_request(struct request_queue *q, struct request *req)
{
if (unlikely(!q))
return;
@@ -2760,7 +2765,7 @@ EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req)
{
unsigned long flags;
- request_queue_t *q = req->q;
+ struct request_queue *q = req->q;
/*
* Gee, IDE calls in w/ NULL q. Fix IDE and remove the
@@ -2798,7 +2803,7 @@ EXPORT_SYMBOL(blk_end_sync_rq);
/*
* Has to be called with the request spinlock acquired
*/
-static int attempt_merge(request_queue_t *q, struct request *req,
+static int attempt_merge(struct request_queue *q, struct request *req,
struct request *next)
{
if (!rq_mergeable(req) || !rq_mergeable(next))
@@ -2851,7 +2856,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
return 1;
}
-static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
+static inline int attempt_back_merge(struct request_queue *q,
+ struct request *rq)
{
struct request *next = elv_latter_request(q, rq);
@@ -2861,7 +2867,8 @@ static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
return 0;
}
-static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
+static inline int attempt_front_merge(struct request_queue *q,
+ struct request *rq)
{
struct request *prev = elv_former_request(q, rq);
@@ -2905,7 +2912,7 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
req->start_time = jiffies;
}
-static int __make_request(request_queue_t *q, struct bio *bio)
+static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
int el_ret, nr_sectors, barrier, err;
@@ -3119,7 +3126,7 @@ static inline int should_fail_request(struct bio *bio)
*/
static inline void __generic_make_request(struct bio *bio)
{
- request_queue_t *q;
+ struct request_queue *q;
sector_t maxsector;
sector_t old_sector;
int ret, nr_sectors = bio_sectors(bio);
@@ -3312,7 +3319,7 @@ static void blk_recalc_rq_segments(struct request *rq)
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
unsigned int phys_size, hw_size;
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
if (!rq->bio)
return;
@@ -3658,7 +3665,8 @@ void end_request(struct request *req, int uptodate)
EXPORT_SYMBOL(end_request);
-void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio)
{
/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= (bio->bi_rw & 3);
@@ -3701,7 +3709,7 @@ int __init blk_dev_init(void)
sizeof(struct request), 0, SLAB_PANIC, NULL);
requestq_cachep = kmem_cache_create("blkdev_queue",
- sizeof(request_queue_t), 0, SLAB_PANIC, NULL);
+ sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
iocontext_cachep = kmem_cache_create("blkdev_ioc",
sizeof(struct io_context), 0, SLAB_PANIC, NULL);
@@ -4021,7 +4029,8 @@ static ssize_t
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct queue_sysfs_entry *entry = to_queue(attr);
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
ssize_t res;
if (!entry->show)
@@ -4041,7 +4050,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct queue_sysfs_entry *entry = to_queue(attr);
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q = container_of(kobj, struct request_queue, kobj);
ssize_t res;
@@ -4072,7 +4081,7 @@ int blk_register_queue(struct gendisk *disk)
{
int ret;
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (!q || !q->request_fn)
return -ENXIO;
@@ -4097,7 +4106,7 @@ int blk_register_queue(struct gendisk *disk)
void blk_unregister_queue(struct gendisk *disk)
{
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (q && q->request_fn) {
elv_unregister_queue(q);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 1c3de2b..7563d8a 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -11,13 +11,13 @@ struct noop_data {
struct list_head queue;
};
-static void noop_merged_requests(request_queue_t *q, struct request *rq,
+static void noop_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
list_del_init(&next->queuelist);
}
-static int noop_dispatch(request_queue_t *q, int force)
+static int noop_dispatch(struct request_queue *q, int force)
{
struct noop_data *nd = q->elevator->elevator_data;
@@ -31,14 +31,14 @@ static int noop_dispatch(request_queue_t *q, int force)
return 0;
}
-static void noop_add_request(request_queue_t *q, struct request *rq)
+static void noop_add_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
list_add_tail(&rq->queuelist, &nd->queue);
}
-static int noop_queue_empty(request_queue_t *q)
+static int noop_queue_empty(struct request_queue *q)
{
struct noop_data *nd = q->elevator->elevator_data;
@@ -46,7 +46,7 @@ static int noop_queue_empty(request_queue_t *q)
}
static struct request *
-noop_former_request(request_queue_t *q, struct request *rq)
+noop_former_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
@@ -56,7 +56,7 @@ noop_former_request(request_queue_t *q, struct request *rq)
}
static struct request *
-noop_latter_request(request_queue_t *q, struct request *rq)
+noop_latter_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
@@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(request_queue_t *q)
+static void *noop_init_queue(struct request_queue *q)
{
struct noop_data *nd;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index d359a71..91c7322 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -49,22 +49,22 @@ static int sg_get_version(int __user *p)
return put_user(sg_version_num, p);
}
-static int scsi_get_idlun(request_queue_t *q, int __user *p)
+static int scsi_get_idlun(struct request_queue *q, int __user *p)
{
return put_user(0, p);
}
-static int scsi_get_bus(request_queue_t *q, int __user *p)
+static int scsi_get_bus(struct request_queue *q, int __user *p)
{
return put_user(0, p);
}
-static int sg_get_timeout(request_queue_t *q)
+static int sg_get_timeout(struct request_queue *q)
{
return q->sg_timeout / (HZ / USER_HZ);
}
-static int sg_set_timeout(request_queue_t *q, int __user *p)
+static int sg_set_timeout(struct request_queue *q, int __user *p)
{
int timeout, err = get_user(timeout, p);
@@ -74,14 +74,14 @@ static int sg_set_timeout(request_queue_t *q, int __user *p)
return err;
}
-static int sg_get_reserved_size(request_queue_t *q, int __user *p)
+static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
return put_user(val, p);
}
-static int sg_set_reserved_size(request_queue_t *q, int __user *p)
+static int sg_set_reserved_size(struct request_queue *q, int __user *p)
{
int size, err = get_user(size, p);
@@ -101,7 +101,7 @@ static int sg_set_reserved_size(request_queue_t *q, int __user *p)
* will always return that we are ATAPI even for a real SCSI drive, I'm not
* so sure this is worth doing anything about (why would you care??)
*/
-static int sg_emulated_host(request_queue_t *q, int __user *p)
+static int sg_emulated_host(struct request_queue *q, int __user *p)
{
return put_user(1, p);
}
@@ -214,7 +214,7 @@ int blk_verify_command(unsigned char *cmd, int has_write_perm)
}
EXPORT_SYMBOL_GPL(blk_verify_command);
-static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,
+static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, int has_write_perm)
{
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@@ -286,7 +286,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return r;
}
-static int sg_io(struct file *file, request_queue_t *q,
+static int sg_io(struct file *file, struct request_queue *q,
struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
unsigned long start_time;
@@ -519,7 +519,8 @@ error:
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
/* Send basic block requests */
-static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data)
+static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
+ int cmd, int data)
{
struct request *rq;
int err;
@@ -539,7 +540,8 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
return err;
}
-static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data)
+static inline int blk_send_start_stop(struct request_queue *q,
+ struct gendisk *bd_disk, int data)
{
return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
}
OpenPOWER on IntegriCloud