diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-10-22 21:19:53 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-22 21:19:53 +0200 |
commit | 45711f1af6eff1a6d010703b4862e0d2b9afd056 (patch) | |
tree | 3d0048f46e3df9d217d56127462ebe680348bd5a /drivers/infiniband | |
parent | 78c2f0b8c285c5305b3e67b0595200541e15eb43 (diff) | |
download | op-kernel-dev-45711f1af6eff1a6d010703b4862e0d2b9afd056.zip op-kernel-dev-45711f1af6eff1a6d010703b4862e0d2b9afd056.tar.gz |
[SG] Update drivers to use sg helpers
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/umem.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_dma.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_mr.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 8 |
5 files changed, 29 insertions, 20 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 2f54e29..14159ff 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d ib_dma_unmap_sg(dev, chunk->page_list, chunk->nents, DMA_BIDIRECTIONAL); for (i = 0; i < chunk->nents; ++i) { + struct page *page = sg_page(&chunk->page_list[i]); + if (umem->writable && dirty) - set_page_dirty_lock(chunk->page_list[i].page); - put_page(chunk->page_list[i].page); + set_page_dirty_lock(page); + put_page(page); } kfree(chunk); @@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, } chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); + sg_init_table(chunk->page_list, chunk->nents); for (i = 0; i < chunk->nents; ++i) { if (vma_list && !is_vm_hugetlb_page(vma_list[i + off])) umem->hugetlb = 0; - chunk->page_list[i].page = page_list[i + off]; + sg_set_page(&chunk->page_list[i], page_list[i + off]); chunk->page_list[i].offset = 0; chunk->page_list[i].length = PAGE_SIZE; } @@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, DMA_BIDIRECTIONAL); if (chunk->nmap <= 0) { for (i = 0; i < chunk->nents; ++i) - put_page(chunk->page_list[i].page); + put_page(sg_page(&chunk->page_list[i])); kfree(chunk); ret = -ENOMEM; diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c index 22709a4..e90a0ea 100644 --- a/drivers/infiniband/hw/ipath/ipath_dma.c +++ b/drivers/infiniband/hw/ipath/ipath_dma.c @@ -108,7 +108,7 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl, BUG_ON(!valid_dma_direction(direction)); for_each_sg(sgl, sg, nents, i) { - addr = (u64) page_address(sg->page); + addr = (u64) page_address(sg_page(sg)); /* TODO: handle highmem pages */ if (!addr) { ret = 0; @@ -127,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev, static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) { - u64 addr = (u64) page_address(sg->page); + u64 addr = (u64) page_address(sg_page(sg)); if (addr) addr += sg->offset; diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index e442470..db4ba92 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c @@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, for (i = 0; i < chunk->nents; i++) { void *vaddr; - vaddr = page_address(chunk->page_list[i].page); + vaddr = page_address(sg_page(&chunk->page_list[i])); if (!vaddr) { ret = ERR_PTR(-EINVAL); goto bail; diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index e61f3e6..007b381 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk * PCI_DMA_BIDIRECTIONAL); for (i = 0; i < chunk->npages; ++i) - __free_pages(chunk->mem[i].page, + __free_pages(sg_page(&chunk->mem[i]), get_order(chunk->mem[i].length)); } @@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun for (i = 0; i < chunk->npages; ++i) { dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, - lowmem_page_address(chunk->mem[i].page), + lowmem_page_address(sg_page(&chunk->mem[i])), sg_dma_address(&chunk->mem[i])); } } @@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent) static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) { - mem->page = alloc_pages(gfp_mask, order); - if (!mem->page) + struct page *page; + + page = alloc_pages(gfp_mask, order); + if (!page) return -ENOMEM; + sg_set_page(mem, page); mem->length = PAGE_SIZE << order; mem->offset = 0; return 0; @@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, if (!chunk) goto fail; + sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN); chunk->npages = 0; chunk->nsg = 0; list_add_tail(&chunk->list, &icm->chunk_list); @@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h * so if we found the page, dma_handle has already * been assigned to. */ if (chunk->mem[i].length > offset) { - page = chunk->mem[i].page; + page = sg_page(&chunk->mem[i]); goto out; } offset -= chunk->mem[i].length; @@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index, u64 uaddr) { + struct page *pages[1]; int ret = 0; u8 status; int i; @@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, } ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, - &db_tab->page[i].mem.page, NULL); + pages, NULL); if (ret < 0) goto out; + sg_set_page(&db_tab->page[i].mem, pages[0]); db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); if (ret < 0) { - put_page(db_tab->page[i].mem.page); + put_page(pages[0]); goto out; } @@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, ret = -EINVAL; if (ret) { pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); - put_page(db_tab->page[i].mem.page); + put_page(sg_page(&db_tab->page[i].mem)); goto out; } @@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, if (db_tab->page[i].uvirt) { mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); - put_page(db_tab->page[i].mem.page); + put_page(sg_page(&db_tab->page[i].mem)); } } diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index f3529b6..d687980 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -131,7 +131,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, p = mem; for_each_sg(sgl, sg, data->size, i) { - from = kmap_atomic(sg->page, KM_USER0); + from = kmap_atomic(sg_page(sg), KM_USER0); memcpy(p, from + sg->offset, sg->length); @@ -191,7 +191,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, p = mem; for_each_sg(sgl, sg, sg_size, i) { - to = kmap_atomic(sg->page, KM_SOFTIRQ0); + to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); memcpy(to + sg->offset, p, sg->length); @@ -300,7 +300,7 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, for_each_sg(sgl, sg, data->dma_nents, i) { /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " "offset: %ld sz: %ld\n", i, - (unsigned long)page_to_phys(sg->page), + (unsigned long)sg_phys(sg), (unsigned long)sg->offset, (unsigned long)sg->length); */ end_addr = ib_sg_dma_address(ibdev, sg) + @@ -336,7 +336,7 @@ static void iser_data_buf_dump(struct iser_data_buf *data, iser_err("sg[%d] dma_addr:0x%lX page:0x%p " "off:0x%x sz:0x%x dma_len:0x%x\n", i, (unsigned long)ib_sg_dma_address(ibdev, sg), - sg->page, sg->offset, + sg_page(sg), sg->offset, sg->length, ib_sg_dma_len(ibdev, sg)); } |