From 34ddf733c415a8e336c996a4303d9f336e0c81b5 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Sun, 2 Apr 2006 16:01:58 -0500 Subject: powerpc/ppc: export strncasecmp We have a strncasecmp so we might as well export it Signed-off-by: Kumar Gala --- arch/powerpc/kernel/ppc_ksyms.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index dfa5398..4b052ae 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -81,6 +81,7 @@ EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strcasecmp); +EXPORT_SYMBOL(strncasecmp); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); -- cgit v1.1 From 7daa411b810d7eadfaabe3765ec5f827893dbb30 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 12 Apr 2006 21:05:59 -0500 Subject: [PATCH] powerpc: IOMMU support for honoring dma_mask Some devices don't support full 32-bit DMA address space, which we currently assume. Add the required mask-passing to the IOMMU allocators. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/iommu.c | 36 ++++++++++++++++++++++++++---------- arch/powerpc/kernel/pci_iommu.c | 40 ++++++++++++++++++++++++++++++++++++---- arch/powerpc/kernel/vio.c | 6 +++--- 3 files changed, 65 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d9a7fde..4eba60a 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -61,6 +61,7 @@ __setup("iommu=", setup_iommu); static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long npages, unsigned long *handle, + unsigned long mask, unsigned int align_order) { unsigned long n, end, i, start; @@ -97,9 +98,21 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, */ if (start >= limit) start = largealloc ? tbl->it_largehint : tbl->it_hint; - + again: + if (limit + tbl->it_offset > mask) { + limit = mask - tbl->it_offset + 1; + /* If we're constrained on address range, first try + * at the masked hint to avoid O(n) search complexity, + * but on second pass, start at 0. + */ + if ((start & mask) >= limit || pass > 0) + start = 0; + else + start &= mask; + } + n = find_next_zero_bit(tbl->it_map, limit, start); /* Align allocation */ @@ -150,14 +163,14 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, - unsigned int align_order) + unsigned long mask, unsigned int align_order) { unsigned long entry, flags; dma_addr_t ret = DMA_ERROR_CODE; - + spin_lock_irqsave(&(tbl->it_lock), flags); - entry = iommu_range_alloc(tbl, npages, NULL, align_order); + entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); if (unlikely(entry == DMA_ERROR_CODE)) { spin_unlock_irqrestore(&(tbl->it_lock), flags); @@ -236,7 +249,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, int iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) + unsigned long mask, enum dma_data_direction direction) { dma_addr_t dma_next = 0, dma_addr; unsigned long flags; @@ -274,7 +287,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, vaddr = (unsigned long)page_address(s->page) + s->offset; npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); npages >>= PAGE_SHIFT; - entry = iommu_range_alloc(tbl, npages, &handle, 0); + entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); @@ -479,7 +492,8 @@ void iommu_free_table(struct device_node *dn) * byte within the page as vaddr. */ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, - size_t size, enum dma_data_direction direction) + size_t size, unsigned long mask, + enum dma_data_direction direction) { dma_addr_t dma_handle = DMA_ERROR_CODE; unsigned long uaddr; @@ -492,7 +506,8 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, npages >>= PAGE_SHIFT; if (tbl) { - dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0); + dma_handle = iommu_alloc(tbl, vaddr, npages, direction, + mask >> PAGE_SHIFT, 0); if (dma_handle == DMA_ERROR_CODE) { if (printk_ratelimit()) { printk(KERN_INFO "iommu_alloc failed, " @@ -521,7 +536,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, * to the dma address (mapping) of the first page. */ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, unsigned long mask, gfp_t flag) { void *ret = NULL; dma_addr_t mapping; @@ -551,7 +566,8 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, memset(ret, 0, size); /* Set up tces to cover the allocated range */ - mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order); + mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, + mask >> PAGE_SHIFT, order); if (mapping == DMA_ERROR_CODE) { free_pages((unsigned long)ret, order); ret = NULL; diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c index c336f3e..c1d95e1 100644 --- a/arch/powerpc/kernel/pci_iommu.c +++ b/arch/powerpc/kernel/pci_iommu.c @@ -59,6 +59,25 @@ static inline struct iommu_table *devnode_table(struct device *dev) } +static inline unsigned long device_to_mask(struct device *hwdev) +{ + struct pci_dev *pdev; + + if (!hwdev) { + pdev = ppc64_isabridge_dev; + if (!pdev) /* This is the best guess we can do */ + return 0xfffffffful; + } else + pdev = to_pci_dev(hwdev); + + if (pdev->dma_mask) + return pdev->dma_mask; + + /* Assume devices without mask can take 32 bit addresses */ + return 0xfffffffful; +} + + /* Allocates a contiguous real buffer and creates mappings over it. * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. @@ -67,7 +86,7 @@ static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, - flag); + device_to_mask(hwdev), flag); } static void pci_iommu_free_coherent(struct device *hwdev, size_t size, @@ -85,7 +104,8 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size, static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, size_t size, enum dma_data_direction direction) { - return iommu_map_single(devnode_table(hwdev), vaddr, size, direction); + return iommu_map_single(devnode_table(hwdev), vaddr, size, + device_to_mask(hwdev), direction); } @@ -100,7 +120,7 @@ static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { return iommu_map_sg(pdev, devnode_table(pdev), sglist, - nelems, direction); + nelems, device_to_mask(pdev), direction); } static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, @@ -112,7 +132,19 @@ static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, /* We support DMA to/from any memory page via the iommu */ static int pci_iommu_dma_supported(struct device *dev, u64 mask) { - return 1; + struct iommu_table *tbl = devnode_table(dev); + + if (!tbl || tbl->it_offset > mask) { + printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n"); + if (tbl) + printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n", + mask, tbl->it_offset); + else + printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", + mask); + return 0; + } else + return 1; } void pci_iommu_init(void) diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 13c655b..971020c 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -202,7 +202,7 @@ static dma_addr_t vio_map_single(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size, - direction); + ~0ul, direction); } static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle, @@ -216,7 +216,7 @@ static int vio_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist, - nelems, direction); + nelems, ~0ul, direction); } static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist, @@ -229,7 +229,7 @@ static void *vio_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, - dma_handle, flag); + dma_handle, ~0ul, flag); } static void vio_free_coherent(struct device *dev, size_t size, -- cgit v1.1 From 28897731318dc8f63f683eed9091e446916ad706 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 12 Apr 2006 21:52:33 -0500 Subject: [PATCH] powerpc: Lower threshold for DART enablement to 1GB Turn on the DART already at 1GB. This is needed because of crippled devices in some systems, i.e. Airport Extreme cards, only supporting 30-bit DMA addresses. Otherwise, users with between 1 and 2GB of memory will need to manually enable it with iommu=force, and that's no good. Some simple performance tests show that there's a slight impact of enabling DART, but it's in the 1-3% range (kernel build with disk I/O as well as over NFS). iommu=off can still be used for those who don't want to deal with the overhead (and don't need it for any devices). Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 4336390..1cb69e8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -62,7 +62,7 @@ static int __initdata dt_root_addr_cells; static int __initdata dt_root_size_cells; #ifdef CONFIG_PPC64 -static int __initdata iommu_is_off; +int __initdata iommu_is_off; int __initdata iommu_force_on; unsigned long tce_alloc_start, tce_alloc_end; #endif -- cgit v1.1 From 23b2527d5eae89841eb66b46e80ec91980493dda Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 18 Apr 2006 15:24:16 +0200 Subject: [PATCH] powerpc/cell: remove BUILD_BUG_ON and add sys_tee to spu_syscall_table Every time a new syscall gets added, a BUILD_BUG_ON in arch/powerpc/platforms/cell/spu_callbacks.c gets triggered. Since the addition of a new syscall is rather harmless, the error should just be removed. While we're here, add sys_tee to the list and add a comment to systbl.S to remind people that there is another list on powerpc. Signed-off-by: Arnd Bergmann Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/systbl.S | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index a14c964..8d15226 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -324,3 +324,8 @@ COMPAT_SYS(ppoll) SYSCALL(unshare) SYSCALL(splice) SYSCALL(tee) + +/* + * please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c + * as well when appropriate. + */ -- cgit v1.1 From 912d35f86781e64d73be1ef358f703c08905ac37 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 26 Apr 2006 10:59:21 +0200 Subject: [PATCH] Add support for the sys_vmsplice syscall sys_splice() moves data to/from pipes with a file input/output. sys_vmsplice() moves data to a pipe, with the input being a user address range instead. This uses an approach suggested by Linus, where we can hold partial ranges inside the pages[] map. Hopefully this will be useful for network receive support as well. Signed-off-by: Jens Axboe --- arch/powerpc/kernel/systbl.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 8d15226..0b98eea 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -324,6 +324,7 @@ COMPAT_SYS(ppoll) SYSCALL(unshare) SYSCALL(splice) SYSCALL(tee) +SYSCALL(vmsplice) /* * please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c -- cgit v1.1 From 649bbaa484bcdce94f40a1b97a6a2ded0549e8a2 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 24 Apr 2006 19:35:15 -0700 Subject: [PATCH] Remove __devinitdata from notifier block definitions Few of the notifier_chain_register() callers use __devinitdata in the definition of notifier_block data structure. It is incorrect as the data structure should be available after the initializations (they do not unregister them during initializations). This was leading to an oops when notifier_chain_register() call is invoked for those callback chains after initialization. This patch fixes all such usages to _not_ have the notifier_block data structure in the init data section. Signed-off-by: Chandra Seetharaman Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 73560ef..0235eb7 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -297,7 +297,7 @@ static int __devinit sysfs_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata sysfs_cpu_nb = { +static struct notifier_block sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; -- cgit v1.1 From 83d722f7e198b034699b1500d98729beff930efd Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 24 Apr 2006 19:35:21 -0700 Subject: [PATCH] Remove __devinit and __cpuinit from notifier_call definitions Few of the notifier_chain_register() callers use __init in the definition of notifier_call. It is incorrect as the function definition should be available after the initializations (they do not unregister them during initializations). This patch fixes all such usages to _not_ have the notifier_call __init section. Signed-off-by: Chandra Seetharaman Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 0235eb7..ed737ca 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit sysfs_cpu_notify(struct notifier_block *self, +static int sysfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; -- cgit v1.1