From 9cb83c7529d929c00f37d821daed1942a1b20602 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 16 Oct 2007 11:24:32 +0200 Subject: [SCSI] add use_sg_chaining option to scsi_host_template This option is true if a low-level driver can support sg chaining. This will be removed eventually when all the drivers are converted to support sg chaining. q->max_phys_segments is set to SCSI_MAX_SG_SEGMENTS if false. Signed-off-by: FUJITA Tomonori Signed-off-by: Jens Axboe --- arch/ia64/hp/sim/simscsi.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index d62fa76..a3a558a 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -360,6 +360,7 @@ static struct scsi_host_template driver_template = { .max_sectors = 1024, .cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN, .use_clustering = DISABLE_CLUSTERING, + .use_sg_chaining = ENABLE_SG_CHAINING, }; static int __init -- cgit v1.1 From 8b87d9f48a2f0e44fac57aca344d407df39a9901 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 24 Jul 2007 12:38:15 +0200 Subject: x86-64: update calgary iommu to sg helpers Acked-by: Muli Ben-Yehuda Signed-off-by: Jens Axboe --- arch/x86/kernel/pci-calgary_64.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 71da01e..a50b787 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -384,31 +385,32 @@ static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, int direction) { struct iommu_table *tbl = find_iommu_table(dev); + struct scatterlist *s; + int i; if (!translate_phb(to_pci_dev(dev))) return; - while (nelems--) { + for_each_sg(sglist, s, nelems, i) { unsigned int npages; - dma_addr_t dma = sglist->dma_address; - unsigned int dmalen = sglist->dma_length; + dma_addr_t dma = s->dma_address; + unsigned int dmalen = s->dma_length; if (dmalen == 0) break; npages = num_dma_pages(dma, dmalen); iommu_free(tbl, dma, npages); - sglist++; } } static int calgary_nontranslate_map_sg(struct device* dev, struct scatterlist *sg, int nelems, int direction) { + struct scatterlist *s; int i; - for (i = 0; i < nelems; i++ ) { - struct scatterlist *s = &sg[i]; + for_each_sg(sg, s, nelems, i) { BUG_ON(!s->page); s->dma_address = virt_to_bus(page_address(s->page) +s->offset); s->dma_length = s->length; @@ -420,6 +422,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, int nelems, int direction) { struct iommu_table *tbl = find_iommu_table(dev); + struct scatterlist *s; unsigned long vaddr; unsigned int npages; unsigned long entry; @@ -428,8 +431,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, if (!translate_phb(to_pci_dev(dev))) return calgary_nontranslate_map_sg(dev, sg, nelems, direction); - for (i = 0; i < nelems; i++ ) { - struct scatterlist *s = &sg[i]; + for_each_sg(sg, s, nelems, i) { BUG_ON(!s->page); vaddr = (unsigned long)page_address(s->page) + s->offset; @@ -454,9 +456,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, return nelems; error: calgary_unmap_sg(dev, sg, nelems, direction); - for (i = 0; i < nelems; i++) { - sg[i].dma_address = bad_dma_address; - sg[i].dma_length = 0; + for_each_sg(sg, s, nelems, i) { + sg->dma_address = bad_dma_address; + sg->dma_length = 0; } return 0; } -- cgit v1.1 From b922f53bd02f2017a3a47f29abaee18f91fb8fb4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 24 Jul 2007 12:39:27 +0200 Subject: x86-64: update nommu to sg helpers Acked-by: Muli Ben-Yehuda Signed-off-by: Jens Axboe --- arch/x86/kernel/pci-nommu_64.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c index 2a34c6c..e85d436 100644 --- a/arch/x86/kernel/pci-nommu_64.c +++ b/arch/x86/kernel/pci-nommu_64.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -57,10 +58,10 @@ static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size, static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + struct scatterlist *s; int i; - for (i = 0; i < nents; i++ ) { - struct scatterlist *s = &sg[i]; + for_each_sg(sg, s, nents, i) { BUG_ON(!s->page); s->dma_address = virt_to_bus(page_address(s->page) +s->offset); if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) -- cgit v1.1 From 9ee1bea4694f99d7f9cc85a93599411ad9f80f47 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 4 Oct 2007 09:35:37 +0200 Subject: x86-64: update pci-gart iommu to sg helpers Signed-off-by: Jens Axboe --- arch/x86/kernel/pci-gart_64.c | 65 ++++++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 4918c57..cfcc84e 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -278,10 +279,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, */ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) { + struct scatterlist *s; int i; - for (i = 0; i < nents; i++) { - struct scatterlist *s = &sg[i]; + for_each_sg(sg, s, nents, i) { if (!s->dma_length || !s->length) break; gart_unmap_single(dev, s->dma_address, s->dma_length, dir); @@ -292,14 +293,14 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, int nents, int dir) { + struct scatterlist *s; int i; #ifdef CONFIG_IOMMU_DEBUG printk(KERN_DEBUG "dma_map_sg overflow\n"); #endif - for (i = 0; i < nents; i++ ) { - struct scatterlist *s = &sg[i]; + for_each_sg(sg, s, nents, i) { unsigned long addr = page_to_phys(s->page) + s->offset; if (nonforced_iommu(dev, addr, s->length)) { addr = dma_map_area(dev, addr, s->length, dir); @@ -319,23 +320,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, } /* Map multiple scatterlist entries continuous into the first. */ -static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, +static int __dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, unsigned long pages) { unsigned long iommu_start = alloc_iommu(pages); unsigned long iommu_page = iommu_start; + struct scatterlist *s; int i; if (iommu_start == -1) return -1; - - for (i = start; i < stopat; i++) { - struct scatterlist *s = &sg[i]; + + for_each_sg(start, s, nelems, i) { unsigned long pages, addr; unsigned long phys_addr = s->dma_address; - BUG_ON(i > start && s->offset); - if (i == start) { + BUG_ON(s != start && s->offset); + if (s == start) { *sout = *s; sout->dma_address = iommu_bus_base; sout->dma_address += iommu_page*PAGE_SIZE + s->offset; @@ -357,17 +358,17 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, return 0; } -static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat, +static inline int dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, unsigned long pages, int need) { - if (!need) { - BUG_ON(stopat - start != 1); - *sout = sg[start]; - sout->dma_length = sg[start].length; + if (!need) { + BUG_ON(nelems != 1); + *sout = *start; + sout->dma_length = start->length; return 0; - } - return __dma_map_cont(sg, start, stopat, sout, pages); + } + return __dma_map_cont(start, nelems, sout, pages); } /* @@ -381,6 +382,7 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) int start; unsigned long pages = 0; int need = 0, nextneed; + struct scatterlist *s, *ps, *start_sg, *sgmap; if (nents == 0) return 0; @@ -390,8 +392,9 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) out = 0; start = 0; - for (i = 0; i < nents; i++) { - struct scatterlist *s = &sg[i]; + start_sg = sgmap = sg; + ps = NULL; /* shut up gcc */ + for_each_sg(sg, s, nents, i) { dma_addr_t addr = page_to_phys(s->page) + s->offset; s->dma_address = addr; BUG_ON(s->length == 0); @@ -400,29 +403,33 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) /* Handle the previous not yet processed entries */ if (i > start) { - struct scatterlist *ps = &sg[i-1]; /* Can only merge when the last chunk ends on a page boundary and the new one doesn't have an offset. */ if (!iommu_merge || !nextneed || !need || s->offset || - (ps->offset + ps->length) % PAGE_SIZE) { - if (dma_map_cont(sg, start, i, sg+out, pages, - need) < 0) + (ps->offset + ps->length) % PAGE_SIZE) { + if (dma_map_cont(start_sg, i - start, sgmap, + pages, need) < 0) goto error; out++; + sgmap = sg_next(sgmap); pages = 0; - start = i; + start = i; + start_sg = s; } } need = nextneed; pages += to_pages(s->offset, s->length); + ps = s; } - if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) + if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) goto error; out++; flush_gart(); - if (out < nents) - sg[out].dma_length = 0; + if (out < nents) { + sgmap = sg_next(sgmap); + sgmap->dma_length = 0; + } return out; error: @@ -437,8 +444,8 @@ error: if (panic_on_overflow) panic("dma_map_sg: overflow on %lu pages\n", pages); iommu_full(dev, pages << PAGE_SHIFT, dir); - for (i = 0; i < nents; i++) - sg[i].dma_address = bad_dma_address; + for_each_sg(sg, s, nents, i) + s->dma_address = bad_dma_address; return 0; } -- cgit v1.1 From 9b6eccfccbfb2cde5405021beaad2ebb8081a2e9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Oct 2007 11:27:26 +0200 Subject: IA64: sg chaining support This updates the ia64 iommu/pci dma mappers to sg chaining. Signed-off-by: Jens Axboe --- arch/ia64/hp/common/sba_iommu.c | 14 +++++++------- arch/ia64/sn/pci/pci_dma.c | 11 ++++++----- 2 files changed, 13 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index e980e7a..4338f41 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, startsg->dma_address, startsg->dma_length, sba_sg_address(startsg)); - startsg++; + startsg = sg_next(startsg); } } @@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) while (the_nents-- > 0) { if (sba_sg_address(the_sg) == 0x0UL) sba_dump_sg(NULL, startsg, nents); - the_sg++; + the_sg = sg_next(the_sg); } } @@ -1201,7 +1201,7 @@ sba_fill_pdir( u32 pide = startsg->dma_address & ~PIDE_FLAG; dma_offset = (unsigned long) pide & ~iovp_mask; startsg->dma_address = 0; - dma_sg++; + dma_sg = sg_next(dma_sg); dma_sg->dma_address = pide | ioc->ibase; pdirp = &(ioc->pdir_base[pide >> iovp_shift]); n_mappings++; @@ -1228,7 +1228,7 @@ sba_fill_pdir( pdirp++; } while (cnt > 0); } - startsg++; + startsg = sg_next(startsg); } /* force pdir update */ wmb(); @@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc, while (--nents > 0) { unsigned long vaddr; /* tmp */ - startsg++; + startsg = sg_next(startsg); /* PARANOID */ startsg->dma_address = startsg->dma_length = 0; @@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di #ifdef ALLOW_IOV_BYPASS_SG ASSERT(to_pci_dev(dev)->dma_mask); if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { - for (sg = sglist ; filled < nents ; filled++, sg++){ + for_each_sg(sglist, sg, nents, filled) { sg->dma_length = sg->length; sg->dma_address = virt_to_phys(sba_sg_address(sg)); } @@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in while (nents && sglist->dma_length) { sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); - sglist++; + sglist = sg_next(sglist); nents--; } diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index d79ddac..ecd8a52 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single); * * Unmap a set of streaming mode DMA translations. */ -void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, +void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, int direction) { int i; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); + struct scatterlist *sg; BUG_ON(dev->bus != &pci_bus_type); - for (i = 0; i < nhwentries; i++, sg++) { + for_each_sg(sgl, sg, nhwentries, i) { provider->dma_unmap(pdev, sg->dma_address, direction); sg->dma_address = (dma_addr_t) NULL; sg->dma_length = 0; @@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg); * * Maps each entry of @sg for DMA. */ -int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, +int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, int direction) { unsigned long phys_addr; - struct scatterlist *saved_sg = sg; + struct scatterlist *saved_sg = sgl, *sg; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int i; @@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, /* * Setup a DMA address for each entry in the scatterlist. */ - for (i = 0; i < nhwentries; i++, sg++) { + for_each_sg(sgl, sg, nhwentries, i) { phys_addr = SG_ENT_PHYS_ADDRESS(sg); sg->dma_address = provider->dma_map(pdev, phys_addr, sg->length, -- cgit v1.1 From d1ed455e30e439e0d1483c2e236d7e15e1010704 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Jul 2007 08:22:17 +0200 Subject: PS3: sg chaining support Acked-by: Geoff Levand Signed-off-by: Jens Axboe --- arch/powerpc/platforms/ps3/system-bus.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 190ff4b..07e64b4 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -616,17 +616,18 @@ static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr, } } -static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) +static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, + int nents, enum dma_data_direction direction) { #if defined(CONFIG_PS3_DYNAMIC_DMA) BUG_ON("do"); return -EPERM; #else struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); + struct scatterlist *sg; int i; - for (i = 0; i < nents; i++, sg++) { + for_each_sg(sgl, sg, nents, i) { int result = ps3_dma_map(dev->d_region, page_to_phys(sg->page) + sg->offset, sg->length, &sg->dma_address, 0); -- cgit v1.1 From 78bdc3106a877cfa50439fa66b52acbc4e7868df Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 12 Oct 2007 13:44:12 +0200 Subject: PPC: sg chaining support This updates the ppc iommu/pci dma mappers to sg chaining. Includes further fixes from FUJITA Tomonori . Signed-off-by: Jens Axboe --- arch/powerpc/kernel/dma_64.c | 5 +++-- arch/powerpc/kernel/ibmebus.c | 11 ++++++----- arch/powerpc/kernel/iommu.c | 23 ++++++++++++++--------- 3 files changed, 23 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c index 7b0e754..9001104 100644 --- a/arch/powerpc/kernel/dma_64.c +++ b/arch/powerpc/kernel/dma_64.c @@ -154,12 +154,13 @@ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, { } -static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg, +static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { + struct scatterlist *sg; int i; - for (i = 0; i < nents; i++, sg++) { + for_each_sg(sgl, sg, nents, i) { sg->dma_address = (page_to_phys(sg->page) + sg->offset) | dma_direct_offset; sg->dma_length = sg->length; diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 53bf646..2e16ca5 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -87,15 +87,16 @@ static void ibmebus_unmap_single(struct device *dev, } static int ibmebus_map_sg(struct device *dev, - struct scatterlist *sg, + struct scatterlist *sgl, int nents, enum dma_data_direction direction) { + struct scatterlist *sg; int i; - for (i = 0; i < nents; i++) { - sg[i].dma_address = (dma_addr_t)page_address(sg[i].page) - + sg[i].offset; - sg[i].dma_length = sg[i].length; + for_each_sg(sgl, sg, nents, i) { + sg->dma_address = (dma_addr_t)page_address(sg->page) + + sg->offset; + sg->dma_length = sg->length; } return nents; diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index e4ec6eee..306a6f7 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, dma_addr_t dma_next = 0, dma_addr; unsigned long flags; struct scatterlist *s, *outs, *segstart; - int outcount, incount; + int outcount, incount, i; unsigned long handle; BUG_ON(direction == DMA_NONE); @@ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, spin_lock_irqsave(&(tbl->it_lock), flags); - for (s = outs; nelems; nelems--, s++) { + for_each_sg(sglist, s, nelems, i) { unsigned long vaddr, npages, entry, slen; slen = s->length; @@ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, if (novmerge || (dma_addr != dma_next)) { /* Can't merge: create a new segment */ segstart = s; - outcount++; outs++; + outcount++; + outs = sg_next(outs); DBG(" can't merge, new segment.\n"); } else { outs->dma_length += s->length; @@ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, * next entry of the sglist if we didn't fill the list completely */ if (outcount < incount) { - outs++; + outs = sg_next(outs); outs->dma_address = DMA_ERROR_CODE; outs->dma_length = 0; } @@ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, return outcount; failure: - for (s = &sglist[0]; s <= outs; s++) { + for_each_sg(sglist, s, nelems, i) { if (s->dma_length != 0) { unsigned long vaddr, npages; @@ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; } + if (s == outs) + break; } spin_unlock_irqrestore(&(tbl->it_lock), flags); return 0; @@ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { + struct scatterlist *sg; unsigned long flags; BUG_ON(direction == DMA_NONE); @@ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, spin_lock_irqsave(&(tbl->it_lock), flags); + sg = sglist; while (nelems--) { unsigned int npages; - dma_addr_t dma_handle = sglist->dma_address; + dma_addr_t dma_handle = sg->dma_address; - if (sglist->dma_length == 0) + if (sg->dma_length == 0) break; - npages = iommu_num_pages(dma_handle,sglist->dma_length); + npages = iommu_num_pages(dma_handle, sg->dma_length); __iommu_free(tbl, dma_handle, npages); - sglist++; + sg = sg_next(sg); } /* Flush/invalidate TLBs if necessary. As for iommu_free(), we -- cgit v1.1 From 0912a5db0ea45d8aef3ee99a882e093285e32c3c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 14 May 2007 15:44:38 +0200 Subject: SPARC: sg chaining support This updates the sparc iommu/pci dma mappers to sg chaining. Acked-by: David S. Miller Signed-off-by: Jens Axboe --- arch/sparc/kernel/ioport.c | 25 +++++++++++++------------ arch/sparc/mm/io-unit.c | 12 +++++++----- arch/sparc/mm/iommu.c | 10 +++++----- arch/sparc/mm/sun4c.c | 10 ++++++---- 4 files changed, 31 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 62182d2..9c3ed88 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -35,6 +35,7 @@ #include #include /* struct pci_dev */ #include +#include #include #include @@ -717,19 +718,19 @@ void pci_unmap_page(struct pci_dev *hwdev, * Device ownership issues as mentioned above for pci_map_single are * the same here. */ -int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, +int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) { + struct scatterlist *sg; int n; BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ - for (n = 0; n < nents; n++) { + for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg->page) == NULL); sg->dvma_address = virt_to_phys(page_address(sg->page)) + sg->offset; sg->dvma_length = sg->length; - sg++; } return nents; } @@ -738,19 +739,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, * Again, cpu read rules concerning calls here are the same as for * pci_unmap_single() above. */ -void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, +void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) { + struct scatterlist *sg; int n; BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { - for (n = 0; n < nents; n++) { + for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg->page) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg->page), (sg->length + PAGE_SIZE-1) & PAGE_MASK); - sg++; } } } @@ -789,34 +790,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t * The same as pci_dma_sync_single_* but for a scatter-gather list, * same rules and usage. */ -void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) +void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) { + struct scatterlist *sg; int n; BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { - for (n = 0; n < nents; n++) { + for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg->page) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg->page), (sg->length + PAGE_SIZE-1) & PAGE_MASK); - sg++; } } } -void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) +void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) { + struct scatterlist *sg; int n; BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { - for (n = 0; n < nents; n++) { + for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg->page) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg->page), (sg->length + PAGE_SIZE-1) & PAGE_MASK); - sg++; } } } diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index 7c89893..375b4db 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -11,8 +11,8 @@ #include #include /* pte_offset_map => kmap_atomic */ #include +#include -#include #include #include #include @@ -144,8 +144,9 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus spin_lock_irqsave(&iounit->lock, flags); while (sz != 0) { --sz; - sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length); - sg[sz].dvma_length = sg[sz].length; + sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length); + sg->dvma_length = sg->length; + sg = sg_next(sg); } spin_unlock_irqrestore(&iounit->lock, flags); } @@ -173,11 +174,12 @@ static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_ spin_lock_irqsave(&iounit->lock, flags); while (sz != 0) { --sz; - len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT; - vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; + len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; + vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); for (len += vaddr; vaddr < len; vaddr++) clear_bit(vaddr, iounit->bmap); + sg = sg_next(sg); } spin_unlock_irqrestore(&iounit->lock, flags); } diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 52e907a..283656d 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -12,8 +12,8 @@ #include #include #include /* pte_offset_map => kmap_atomic */ +#include -#include #include #include #include @@ -240,7 +240,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; sg->dvma_length = (__u32) sg->length; - sg++; + sg = sg_next(sg); } } @@ -254,7 +254,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; sg->dvma_length = (__u32) sg->length; - sg++; + sg = sg_next(sg); } } @@ -285,7 +285,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; sg->dvma_length = (__u32) sg->length; - sg++; + sg = sg_next(sg); } } @@ -325,7 +325,7 @@ static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus); sg->dvma_address = 0x21212121; - sg++; + sg = sg_next(sg); } } diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index 005a3e7..ee6708f 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -17,8 +17,8 @@ #include #include #include +#include -#include #include #include #include @@ -1228,8 +1228,9 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus * { while (sz != 0) { --sz; - sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length); - sg[sz].dvma_length = sg[sz].length; + sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length); + sg->dvma_length = sg->length; + sg = sg_next(sg); } } @@ -1244,7 +1245,8 @@ static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b { while (sz != 0) { --sz; - sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length); + sun4c_unlockarea((char *)sg->dvma_address, sg->length); + sg = sg_next(sg); } } -- cgit v1.1 From 2c941a204070ab32d92d40318a3196a7fb994c00 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 7 Aug 2007 09:37:10 +0200 Subject: SPARC64: sg chaining support This updates the sparc64 iommu/pci dma mappers to sg chaining. Acked-by: David S. Miller Later updated to newer kernel with unified sparc64 iommu sg handling. Signed-off-by: Jens Axboe --- arch/sparc64/kernel/iommu.c | 39 ++++++++++++++++++++++++--------------- arch/sparc64/kernel/pci_sun4v.c | 32 +++++++++++++++++++------------- 2 files changed, 43 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c index b35a621..db3ffcf 100644 --- a/arch/sparc64/kernel/iommu.c +++ b/arch/sparc64/kernel/iommu.c @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef CONFIG_PCI #include @@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, unsigned long iopte_protection) { struct scatterlist *dma_sg = sg; - struct scatterlist *sg_end = sg + nelems; + struct scatterlist *sg_end = sg_last(sg, nelems); int i; for (i = 0; i < nused; i++) { @@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); break; } - sg++; + sg = sg_next(sg); } pteval = iopte_protection | (pteval & IOPTE_PAGE); @@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, } pteval = (pteval & IOPTE_PAGE) + len; - sg++; + sg = sg_next(sg); /* Skip over any tail mappings we've fully mapped, * adjusting pteval along the way. Stop when we * detect a page crossing event. */ - while (sg < sg_end && + while (sg != sg_end && (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && (pteval == SG_ENT_PHYS_ADDRESS(sg)) && ((pteval ^ (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { pteval += sg->length; - sg++; + sg = sg_next(sg); } if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) pteval = ~0UL; } while (dma_npages != 0); - dma_sg++; + dma_sg = sg_next(dma_sg); } } @@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, sgtmp = sglist; while (used && sgtmp->dma_length) { sgtmp->dma_address += dma_base; - sgtmp++; + sgtmp = sg_next(sgtmp); used--; } used = nelems - used; @@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, struct strbuf *strbuf; iopte_t *base; unsigned long flags, ctx, i, npages; + struct scatterlist *sg, *sgprv; u32 bus_addr; if (unlikely(direction == DMA_NONE)) { @@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, bus_addr = sglist->dma_address & IO_PAGE_MASK; - for (i = 1; i < nelems; i++) - if (sglist[i].dma_length == 0) + sgprv = NULL; + for_each_sg(sglist, sg, nelems, i) { + if (sg->dma_length == 0) break; - i--; - npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - + sgprv = sg; + } + + npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - bus_addr) >> IO_PAGE_SHIFT; base = iommu->page_table + @@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, struct iommu *iommu; struct strbuf *strbuf; unsigned long flags, ctx, npages, i; + struct scatterlist *sg, *sgprv; u32 bus_addr; iommu = dev->archdata.iommu; @@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, /* Step 2: Kick data out of streaming buffers. */ bus_addr = sglist[0].dma_address & IO_PAGE_MASK; - for(i = 1; i < nelems; i++) - if (!sglist[i].dma_length) + sgprv = NULL; + for_each_sg(sglist, sg, nelems, i) { + if (sg->dma_length == 0) break; - i--; - npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) + sgprv = sg; + } + + npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - bus_addr) >> IO_PAGE_SHIFT; strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 95de144..cacacfa 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev, int nused, int nelems, unsigned long prot) { struct scatterlist *dma_sg = sg; - struct scatterlist *sg_end = sg + nelems; + struct scatterlist *sg_end = sg_last(sg, nelems); unsigned long flags; int i; @@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev, len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); break; } - sg++; + sg = sg_next(sg); } pteval = (pteval & IOPTE_PAGE); @@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev, } pteval = (pteval & IOPTE_PAGE) + len; - sg++; + sg = sg_next(sg); /* Skip over any tail mappings we've fully mapped, * adjusting pteval along the way. Stop when we * detect a page crossing event. */ - while (sg < sg_end && - (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && + while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL && (pteval == SG_ENT_PHYS_ADDRESS(sg)) && ((pteval ^ (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { pteval += sg->length; - sg++; + if (sg == sg_end) + break; + sg = sg_next(sg); } if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) pteval = ~0UL; } while (dma_npages != 0); - dma_sg++; + dma_sg = sg_next(dma_sg); } if (unlikely(iommu_batch_end() < 0L)) @@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, sgtmp = sglist; while (used && sgtmp->dma_length) { sgtmp->dma_address += dma_base; - sgtmp++; + sgtmp = sg_next(sgtmp); used--; } used = nelems - used; @@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, struct pci_pbm_info *pbm; struct iommu *iommu; unsigned long flags, i, npages; + struct scatterlist *sg, *sgprv; long entry; u32 devhandle, bus_addr; @@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, devhandle = pbm->devhandle; bus_addr = sglist->dma_address & IO_PAGE_MASK; - - for (i = 1; i < nelems; i++) - if (sglist[i].dma_length == 0) + sgprv = NULL; + for_each_sg(sglist, sg, nelems, i) { + if (sg->dma_length == 0) break; - i--; - npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - + + sgprv = sg; + } + + npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - bus_addr) >> IO_PAGE_SHIFT; entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); -- cgit v1.1