diff options
author | Jack Morgenstein <jackm@dev.mellanox.co.il> | 2008-01-28 10:40:51 +0200 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-02-06 21:17:45 -0800 |
commit | 313abe55a87bc10e55d00f337d609e17ad5f8c9a (patch) | |
tree | 833ff3c4b33f83d4ca64ed322c2d8efa21529d71 /drivers/net/mlx4 | |
parent | 1c69fc2a9012e160c8d459f63df74a6b01db8322 (diff) | |
download | op-kernel-dev-313abe55a87bc10e55d00f337d609e17ad5f8c9a.zip op-kernel-dev-313abe55a87bc10e55d00f337d609e17ad5f8c9a.tar.gz |
mlx4_core: For 64-bit systems, vmap() kernel queue buffers
Since kernel virtual memory is not a problem on 64-bit systems, there
is no reason to use our own 2-layer page mapping scheme for large
kernel queue buffers on such systems. Instead, map the page list to a
single virtually contiguous buffer with vmap(), so that can we access
buffer memory via direct indexing.
Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r-- | drivers/net/mlx4/alloc.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index b226e01..2da2c2e 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c @@ -151,6 +151,19 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); } + + if (BITS_PER_LONG == 64) { + struct page **pages; + pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; ++i) + pages[i] = virt_to_page(buf->u.page_list[i].buf); + buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->u.direct.buf) + goto err_free; + } } return 0; @@ -170,6 +183,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, buf->u.direct.map); else { + if (BITS_PER_LONG == 64) + vunmap(buf->u.direct.buf); + for (i = 0; i < buf->nbufs; ++i) if (buf->u.page_list[i].buf) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |