diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 68 | ||||
-rw-r--r-- | block/blk-ioc.c | 2 | ||||
-rw-r--r-- | block/blk-merge.c | 12 | ||||
-rw-r--r-- | block/blk-settings.c | 8 | ||||
-rw-r--r-- | block/blk-sysfs.c | 6 | ||||
-rw-r--r-- | block/blk-tag.c | 9 | ||||
-rw-r--r-- | block/blktrace.c | 31 | ||||
-rw-r--r-- | block/bsg.c | 12 | ||||
-rw-r--r-- | block/cfq-iosched.c | 57 | ||||
-rw-r--r-- | block/compat_ioctl.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 2 | ||||
-rw-r--r-- | block/genhd.c | 11 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 5 |
14 files changed, 152 insertions, 75 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 8c39467..743f33a 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -831,6 +831,8 @@ static void as_completed_request(struct request_queue *q, struct request *rq) } if (ad->changed_batch && ad->nr_dispatched == 1) { + ad->current_batch_expires = jiffies + + ad->batch_expire[ad->batch_data_dir]; kblockd_schedule_work(&ad->antic_work); ad->changed_batch = 0; diff --git a/block/blk-core.c b/block/blk-core.c index b754a4a..1905aab 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -54,15 +54,16 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); static void drive_stat_acct(struct request *rq, int new_io) { + struct hd_struct *part; int rw = rq_data_dir(rq); if (!blk_fs_request(rq) || !rq->rq_disk) return; - if (!new_io) { - __all_stat_inc(rq->rq_disk, merges[rw], rq->sector); - } else { - struct hd_struct *part = get_part(rq->rq_disk, rq->sector); + part = get_part(rq->rq_disk, rq->sector); + if (!new_io) + __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); + else { disk_round_stats(rq->rq_disk); rq->rq_disk->in_flight++; if (part) { @@ -253,9 +254,11 @@ EXPORT_SYMBOL(__generic_unplug_device); **/ void generic_unplug_device(struct request_queue *q) { - spin_lock_irq(q->queue_lock); - __generic_unplug_device(q); - spin_unlock_irq(q->queue_lock); + if (blk_queue_plugged(q)) { + spin_lock_irq(q->queue_lock); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + } } EXPORT_SYMBOL(generic_unplug_device); @@ -479,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) kobject_init(&q->kobj, &blk_queue_ktype); mutex_init(&q->sysfs_lock); + spin_lock_init(&q->__queue_lock); return q; } @@ -541,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) * if caller didn't supply a lock, they get per-queue locking with * our embedded lock */ - if (!lock) { - spin_lock_init(&q->__queue_lock); + if (!lock) lock = &q->__queue_lock; - } q->request_fn = rfn; q->prep_rq_fn = NULL; @@ -804,35 +806,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, rq = get_request(q, rw_flags, bio, GFP_NOIO); while (!rq) { DEFINE_WAIT(wait); + struct io_context *ioc; struct request_list *rl = &q->rq; prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - rq = get_request(q, rw_flags, bio, GFP_NOIO); - - if (!rq) { - struct io_context *ioc; - - blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); - __generic_unplug_device(q); - spin_unlock_irq(q->queue_lock); - io_schedule(); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + io_schedule(); - /* - * After sleeping, we become a "batching" process and - * will be able to allocate at least one request, and - * up to a big batch of them for a small period time. - * See ioc_batching, ioc_set_batching - */ - ioc = current_io_context(GFP_NOIO, q->node); - ioc_set_batching(q, ioc); + /* + * After sleeping, we become a "batching" process and + * will be able to allocate at least one request, and + * up to a big batch of them for a small period time. + * See ioc_batching, ioc_set_batching + */ + ioc = current_io_context(GFP_NOIO, q->node); + ioc_set_batching(q, ioc); - spin_lock_irq(q->queue_lock); - } + spin_lock_irq(q->queue_lock); finish_wait(&rl->wait[rw], &wait); - } + + rq = get_request(q, rw_flags, bio, GFP_NOIO); + }; return rq; } @@ -1536,10 +1535,11 @@ static int __end_that_request_first(struct request *req, int error, } if (blk_fs_request(req) && req->rq_disk) { + struct hd_struct *part = get_part(req->rq_disk, req->sector); const int rw = rq_data_dir(req); - all_stat_add(req->rq_disk, sectors[rw], - nr_bytes >> 9, req->sector); + all_stat_add(req->rq_disk, part, sectors[rw], + nr_bytes >> 9, req->sector); } total_bytes = bio_nbytes = 0; @@ -1725,8 +1725,8 @@ static void end_that_request_last(struct request *req, int error) const int rw = rq_data_dir(req); struct hd_struct *part = get_part(disk, req->sector); - __all_stat_inc(disk, ios[rw], req->sector); - __all_stat_add(disk, ticks[rw], duration, req->sector); + __all_stat_inc(disk, part, ios[rw], req->sector); + __all_stat_add(disk, part, ticks[rw], duration, req->sector); disk_round_stats(disk); disk->in_flight--; if (part) { diff --git a/block/blk-ioc.c b/block/blk-ioc.c index e34df7c..012f065 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc) rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - rcu_read_unlock(); cfq_dtor(ioc); + rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); return 1; diff --git a/block/blk-merge.c b/block/blk-merge.c index 73b2356..651136a 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); - if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) + if (!bio_flagged(nxt, BIO_SEG_VALID)) blk_recount_segments(q, nxt); if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) @@ -312,9 +312,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, q->last_merge = NULL; return 0; } - if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) + if (!bio_flagged(req->biotail, BIO_SEG_VALID)) blk_recount_segments(q, req->biotail); - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) @@ -352,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, return 0; } len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); - if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) + if (!bio_flagged(req->bio, BIO_SEG_VALID)) blk_recount_segments(q, req->bio); if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && !BIOVEC_VIRT_OVERSIZE(len)) { diff --git a/block/blk-settings.c b/block/blk-settings.c index bb93d4c..8dd8641 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -286,8 +286,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); t->max_segment_size = min(t->max_segment_size, b->max_segment_size); t->hardsect_size = max(t->hardsect_size, b->hardsect_size); - if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + if (!t->queue_lock) + WARN_ON_ONCE(1); + else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { + unsigned long flags; + spin_lock_irqsave(t->queue_lock, flags); queue_flag_clear(QUEUE_FLAG_CLUSTER, t); + spin_unlock_irqrestore(t->queue_lock, flags); + } } EXPORT_SYMBOL(blk_queue_stack_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e85c401..304ec73 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -146,11 +146,13 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, unsigned long nm; ssize_t ret = queue_var_store(&nm, page, count); + spin_lock_irq(q->queue_lock); if (nm) - set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_NOMERGES, q); else - clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_NOMERGES, q); + spin_unlock_irq(q->queue_lock); return ret; } diff --git a/block/blk-tag.c b/block/blk-tag.c index de64e04..32667be 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q) __blk_free_tags(bqt); q->queue_tags = NULL; - queue_flag_clear(QUEUE_FLAG_QUEUED, q); + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** @@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags); **/ void blk_queue_free_tags(struct request_queue *q) { - queue_flag_clear(QUEUE_FLAG_QUEUED, q); + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } EXPORT_SYMBOL(blk_queue_free_tags); @@ -171,6 +171,9 @@ EXPORT_SYMBOL(blk_init_tags); * @q: the request queue for the device * @depth: the maximum queue depth supported * @tags: the tag to use + * + * Queue lock must be held here if the function is called to resize an + * existing map. **/ int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) @@ -197,7 +200,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, * assign it, all done */ q->queue_tags = tags; - queue_flag_set(QUEUE_FLAG_QUEUED, q); + queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); INIT_LIST_HEAD(&q->tag_busy_list); return 0; fail: diff --git a/block/blktrace.c b/block/blktrace.c index 568588c..8d3a277 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -75,6 +75,24 @@ static void trace_note_time(struct blk_trace *bt) local_irq_restore(flags); } +void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) +{ + int n; + va_list args; + unsigned long flags; + char *buf; + + local_irq_save(flags); + buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); + va_start(args, fmt); + n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); + va_end(args); + + trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(__trace_note_message); + static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, pid_t pid) { @@ -141,10 +159,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, /* * A word about the locking here - we disable interrupts to reserve * some space in the relay per-cpu buffer, to prevent an irq - * from coming in and stepping on our toes. Once reserved, it's - * enough to get preemption disabled to prevent read of this data - * before we are through filling it. get_cpu()/put_cpu() does this - * for us + * from coming in and stepping on our toes. */ local_irq_save(flags); @@ -232,6 +247,7 @@ static void blk_trace_cleanup(struct blk_trace *bt) debugfs_remove(bt->dropped_file); blk_remove_tree(bt->dir); free_percpu(bt->sequence); + free_percpu(bt->msg_data); kfree(bt); } @@ -346,6 +362,10 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (!bt->sequence) goto err; + bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG); + if (!bt->msg_data) + goto err; + ret = -ENOENT; dir = blk_create_tree(buts->name); if (!dir) @@ -392,6 +412,7 @@ err: if (bt->dropped_file) debugfs_remove(bt->dropped_file); free_percpu(bt->sequence); + free_percpu(bt->msg_data); if (bt->rchan) relay_close(bt->rchan); kfree(bt); @@ -476,7 +497,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) switch (cmd) { case BLKTRACESETUP: - strcpy(b, bdevname(bdev, b)); + bdevname(bdev, b); ret = blk_trace_setup(q, b, bdev->bd_dev, arg); break; case BLKTRACESTART: diff --git a/block/bsg.c b/block/bsg.c index fa796b6..f0b7cd3 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -174,7 +174,11 @@ unlock: static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, struct sg_io_v4 *hdr, int has_write_perm) { - memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ + if (hdr->request_len > BLK_MAX_CDB) { + rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); + if (!rq->cmd) + return -ENOMEM; + } if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, hdr->request_len)) @@ -211,8 +215,6 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) if (hdr->guard != 'Q') return -EINVAL; - if (hdr->request_len > BLK_MAX_CDB) - return -EINVAL; if (hdr->dout_xfer_len > (q->max_sectors << 9) || hdr->din_xfer_len > (q->max_sectors << 9)) return -EIO; @@ -302,6 +304,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) } return rq; out: + if (rq->cmd != rq->__cmd) + kfree(rq->cmd); blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); @@ -455,6 +459,8 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, ret = rq->errors; blk_rq_unmap_user(bio); + if (rq->cmd != rq->__cmd) + kfree(rq->cmd); blk_put_request(rq); return ret; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f4e1006..d01b411 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -124,6 +124,8 @@ struct cfq_data { struct cfq_queue { /* reference count */ atomic_t ref; + /* various state flags, see below */ + unsigned int flags; /* parent cfq_data */ struct cfq_data *cfqd; /* service_tree member */ @@ -138,14 +140,14 @@ struct cfq_queue { int queued[2]; /* currently allocated requests */ int allocated[2]; - /* pending metadata requests */ - int meta_pending; /* fifo list of requests in sort_list */ struct list_head fifo; unsigned long slice_end; long slice_resid; + /* pending metadata requests */ + int meta_pending; /* number of requests that are on the dispatch list or inside driver */ int dispatched; @@ -153,8 +155,6 @@ struct cfq_queue { unsigned short ioprio, org_ioprio; unsigned short ioprio_class, org_ioprio_class; - /* various state flags, see below */ - unsigned int flags; }; enum cfqq_state_flags { @@ -1143,18 +1143,28 @@ static void cfq_put_queue(struct cfq_queue *cfqq) } /* - * Call func for each cic attached to this ioc. + * Must always be called with the rcu_read_lock() held */ static void -call_for_each_cic(struct io_context *ioc, - void (*func)(struct io_context *, struct cfq_io_context *)) +__call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, struct cfq_io_context *)) { struct cfq_io_context *cic; struct hlist_node *n; - rcu_read_lock(); hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) func(ioc, cic); +} + +/* + * Call func for each cic attached to this ioc. + */ +static void +call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, struct cfq_io_context *)) +{ + rcu_read_lock(); + __call_for_each_cic(ioc, func); rcu_read_unlock(); } @@ -1190,6 +1200,11 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) cfq_cic_free(cic); } +/* + * Must be called with rcu_read_lock() held or preemption otherwise disabled. + * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), + * and ->trim() which is called with the task lock held + */ static void cfq_free_io_context(struct io_context *ioc) { /* @@ -1198,7 +1213,7 @@ static void cfq_free_io_context(struct io_context *ioc) * should be ok to iterate over the known list, we will see all cic's * since no new ones are added. */ - call_for_each_cic(ioc, cic_free_func); + __call_for_each_cic(ioc, cic_free_func); } static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) @@ -1296,10 +1311,10 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); case IOPRIO_CLASS_NONE: /* - * no prio set, place us in the middle of the BE classes + * no prio set, inherit CPU scheduling settings */ cfqq->ioprio = task_nice_ioprio(tsk); - cfqq->ioprio_class = IOPRIO_CLASS_BE; + cfqq->ioprio_class = task_nice_ioclass(tsk); break; case IOPRIO_CLASS_RT: cfqq->ioprio = task_ioprio(ioc); @@ -1495,20 +1510,24 @@ static struct cfq_io_context * cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) { struct cfq_io_context *cic; + unsigned long flags; void *k; if (unlikely(!ioc)) return NULL; + rcu_read_lock(); + /* * we maintain a last-hit cache, to avoid browsing over the tree */ cic = rcu_dereference(ioc->ioc_data); - if (cic && cic->key == cfqd) + if (cic && cic->key == cfqd) { + rcu_read_unlock(); return cic; + } do { - rcu_read_lock(); cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); rcu_read_unlock(); if (!cic) @@ -1517,10 +1536,13 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) k = cic->key; if (unlikely(!k)) { cfq_drop_dead_cic(cfqd, ioc, cic); + rcu_read_lock(); continue; } + spin_lock_irqsave(&ioc->lock, flags); rcu_assign_pointer(ioc->ioc_data, cic); + spin_unlock_irqrestore(&ioc->lock, flags); break; } while (1); @@ -2127,6 +2149,10 @@ static void *cfq_init_queue(struct request_queue *q) static void cfq_slab_kill(void) { + /* + * Caller already ensured that pending RCU callbacks are completed, + * so we should have no busy allocations at this point. + */ if (cfq_pool) kmem_cache_destroy(cfq_pool); if (cfq_ioc_pool) @@ -2285,6 +2311,11 @@ static void __exit cfq_exit(void) ioc_gone = &all_gone; /* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); + + /* + * this also protects us from entering cfq_slab_kill() with + * pending RCU callbacks + */ if (elv_ioc_count_read(ioc_count)) wait_for_completion(ioc_gone); cfq_slab_kill(); diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index c70d0b6..c23177e 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -555,7 +555,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) if (copy_from_user(&cbuts, arg, sizeof(cbuts))) return -EFAULT; - strcpy(b, bdevname(bdev, b)); + bdevname(bdev, b); buts = (struct blk_user_trace_setup) { .act_mask = cbuts.act_mask, diff --git a/block/elevator.c b/block/elevator.c index 980f8ae..902dd13 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -1110,6 +1110,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); + blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); + return 1; fail_register: diff --git a/block/genhd.c b/block/genhd.c index fda9c7a..b922d48 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -653,15 +653,21 @@ void genhd_media_change_notify(struct gendisk *disk) EXPORT_SYMBOL_GPL(genhd_media_change_notify); #endif /* 0 */ -dev_t blk_lookup_devt(const char *name) +dev_t blk_lookup_devt(const char *name, int part) { struct device *dev; dev_t devt = MKDEV(0, 0); mutex_lock(&block_class_lock); list_for_each_entry(dev, &block_class.devices, node) { + if (dev->type != &disk_type) + continue; if (strcmp(dev->bus_id, name) == 0) { - devt = dev->devt; + struct gendisk *disk = dev_to_disk(dev); + + if (part < disk->minors) + devt = MKDEV(MAJOR(dev->devt), + MINOR(dev->devt) + part); break; } } @@ -669,7 +675,6 @@ dev_t blk_lookup_devt(const char *name) return devt; } - EXPORT_SYMBOL(blk_lookup_devt); struct gendisk *alloc_disk(int minors) diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index ffa3720..78199c0 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -33,13 +33,12 @@ #include <scsi/scsi_cmnd.h> /* Command group 3 is reserved and should never be used. */ -const unsigned char scsi_command_size[8] = +const unsigned char scsi_command_size_tbl[8] = { 6, 10, 10, 12, 16, 12, 10, 10 }; - -EXPORT_SYMBOL(scsi_command_size); +EXPORT_SYMBOL(scsi_command_size_tbl); #include <scsi/sg.h> |