diff options
author | David S. Miller <davem@davemloft.net> | 2008-03-18 00:37:55 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-03-18 00:37:55 -0700 |
commit | 577f99c1d08cf9cbdafd4e858dd13ff04d855090 (patch) | |
tree | 0f726bbda9b18d311d4c95198bbd96cb7ac01db0 /block/blk-settings.c | |
parent | 26c0f03f6b77c513cb7bc37b73a06819bdbb791b (diff) | |
parent | 2f633928cbba8a5858bb39b11e7219a41b0fbef5 (diff) | |
download | op-kernel-dev-577f99c1d08cf9cbdafd4e858dd13ff04d855090.zip op-kernel-dev-577f99c1d08cf9cbdafd4e858dd13ff04d855090.tar.gz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/wireless/rt2x00/rt2x00dev.c
net/8021q/vlan_dev.c
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 9a8ffdd..1344a0e 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -140,7 +140,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) /* Assume anything <= 4GB can be handled by IOMMU. Actually some IOMMUs can handle everything, but I don't know of a way to test this here. */ - if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) + if (b_pfn <= (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->bounce_pfn = max_low_pfn; #else @@ -293,8 +293,24 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) EXPORT_SYMBOL(blk_queue_stack_limits); /** - * blk_queue_dma_drain - Set up a drain buffer for excess dma. + * blk_queue_dma_pad - set pad mask + * @q: the request queue for the device + * @mask: pad mask + * + * Set pad mask. Direct IO requests are padded to the mask specified. * + * Appending pad buffer to a request modifies ->data_len such that it + * includes the pad buffer. The original requested data length can be + * obtained using blk_rq_raw_data_len(). + **/ +void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) +{ + q->dma_pad_mask = mask; +} +EXPORT_SYMBOL(blk_queue_dma_pad); + +/** + * blk_queue_dma_drain - Set up a drain buffer for excess dma. * @q: the request queue for the device * @dma_drain_needed: fn which returns non-zero if drain is necessary * @buf: physically contiguous buffer @@ -316,7 +332,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits); * device can support otherwise there won't be room for the drain * buffer. */ -extern int blk_queue_dma_drain(struct request_queue *q, +int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size) { |