diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/find_next_bit.c | 22 | ||||
-rw-r--r-- | lib/idr.c | 10 | ||||
-rw-r--r-- | lib/inflate.c | 3 | ||||
-rw-r--r-- | lib/iomap.c | 2 | ||||
-rw-r--r-- | lib/ratelimit.c | 51 | ||||
-rw-r--r-- | lib/swiotlb.c | 149 |
7 files changed, 156 insertions, 83 deletions
diff --git a/lib/Makefile b/lib/Makefile index 2d7001b..0ae4eb0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o + proportions.o prio_heap.o ratelimit.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index d3f5784..24c59de 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -20,8 +20,8 @@ /* * Find the next set bit in a memory region. */ -unsigned long __find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); @@ -58,14 +58,14 @@ found_first: found_middle: return result + __ffs(tmp); } -EXPORT_SYMBOL(__find_next_bit); +EXPORT_SYMBOL(find_next_bit); /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. */ -unsigned long __find_next_zero_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); @@ -102,15 +102,14 @@ found_first: found_middle: return result + ffz(tmp); } -EXPORT_SYMBOL(__find_next_zero_bit); +EXPORT_SYMBOL(find_next_zero_bit); #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ #ifdef CONFIG_GENERIC_FIND_FIRST_BIT /* * Find the first set bit in a memory region. */ -unsigned long __find_first_bit(const unsigned long *addr, - unsigned long size) +unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { const unsigned long *p = addr; unsigned long result = 0; @@ -131,13 +130,12 @@ unsigned long __find_first_bit(const unsigned long *addr, found: return result + __ffs(tmp); } -EXPORT_SYMBOL(__find_first_bit); +EXPORT_SYMBOL(find_first_bit); /* * Find the first cleared bit in a memory region. */ -unsigned long __find_first_zero_bit(const unsigned long *addr, - unsigned long size) +unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) { const unsigned long *p = addr; unsigned long result = 0; @@ -158,7 +156,7 @@ unsigned long __find_first_zero_bit(const unsigned long *addr, found: return result + ffz(tmp); } -EXPORT_SYMBOL(__find_first_zero_bit); +EXPORT_SYMBOL(find_first_zero_bit); #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifdef __BIG_ENDIAN @@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) memset(idr_layer, 0, sizeof(struct idr_layer)); } -static int init_id_cache(void) +void __init idr_init_cache(void) { - if (!idr_layer_cache) - idr_layer_cache = kmem_cache_create("idr_layer_cache", - sizeof(struct idr_layer), 0, 0, idr_cache_ctor); - return 0; + idr_layer_cache = kmem_cache_create("idr_layer_cache", + sizeof(struct idr_layer), 0, SLAB_PANIC, + idr_cache_ctor); } /** @@ -602,7 +601,6 @@ static int init_id_cache(void) */ void idr_init(struct idr *idp) { - init_id_cache(); memset(idp, 0, sizeof(struct idr)); spin_lock_init(&idp->lock); } diff --git a/lib/inflate.c b/lib/inflate.c index 845f91d..9762294 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -811,6 +811,9 @@ DEBG("<dyn"); ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ #endif + if (ll == NULL) + return 1; + /* make local bit buffer */ b = bb; k = bk; diff --git a/lib/iomap.c b/lib/iomap.c index dd6ca48..37a3ea4 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); - unsigned long len = pci_resource_len(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (!len || !start) diff --git a/lib/ratelimit.c b/lib/ratelimit.c new file mode 100644 index 0000000..485e304 --- /dev/null +++ b/lib/ratelimit.c @@ -0,0 +1,51 @@ +/* + * ratelimit.c - Do something with rate limit. + * + * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> + * + * This file is released under the GPLv2. + * + */ + +#include <linux/kernel.h> +#include <linux/jiffies.h> +#include <linux/module.h> + +/* + * __ratelimit - rate limiting + * @ratelimit_jiffies: minimum time in jiffies between two callbacks + * @ratelimit_burst: number of callbacks we do before ratelimiting + * + * This enforces a rate limit: not more than @ratelimit_burst callbacks + * in every ratelimit_jiffies + */ +int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) +{ + static DEFINE_SPINLOCK(ratelimit_lock); + static unsigned toks = 10 * 5 * HZ; + static unsigned long last_msg; + static int missed; + unsigned long flags; + unsigned long now = jiffies; + + spin_lock_irqsave(&ratelimit_lock, flags); + toks += now - last_msg; + last_msg = now; + if (toks > (ratelimit_burst * ratelimit_jiffies)) + toks = ratelimit_burst * ratelimit_jiffies; + if (toks >= ratelimit_jiffies) { + int lost = missed; + + missed = 0; + toks -= ratelimit_jiffies; + spin_unlock_irqrestore(&ratelimit_lock, flags); + if (lost) + printk(KERN_WARNING "%s: %d messages suppressed\n", + __func__, lost); + return 1; + } + missed++; + spin_unlock_irqrestore(&ratelimit_lock, flags); + return 0; +} +EXPORT_SYMBOL(__ratelimit); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 0259228..d568894 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -31,6 +31,7 @@ #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/iommu-helper.h> #define OFFSET(val,align) ((unsigned long) \ ( (val) & ( (align) - 1))) @@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) return (addr & ~mask) != 0; } -static inline unsigned int is_span_boundary(unsigned int index, - unsigned int nslots, - unsigned long offset_slots, - unsigned long max_slots) -{ - unsigned long offset = (offset_slots + index) & (max_slots - 1); - return offset + nslots > max_slots; -} - /* * Allocates bounce buffer and returns its kernel virtual address. */ @@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) * request and allocate a buffer from that IO TLB pool. */ spin_lock_irqsave(&io_tlb_lock, flags); - { - index = ALIGN(io_tlb_index, stride); - if (index >= io_tlb_nslabs) - index = 0; - wrap = index; - - do { - while (is_span_boundary(index, nslots, offset_slots, - max_slots)) { - index += stride; - if (index >= io_tlb_nslabs) - index = 0; - if (index == wrap) - goto not_found; - } - - /* - * If we find a slot that indicates we have 'nslots' - * number of contiguous buffers, we allocate the - * buffers from that slot and mark the entries as '0' - * indicating unavailable. - */ - if (io_tlb_list[index] >= nslots) { - int count = 0; - - for (i = index; i < (int) (index + nslots); i++) - io_tlb_list[i] = 0; - for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; - dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); - - /* - * Update the indices to avoid searching in - * the next round. - */ - io_tlb_index = ((index + nslots) < io_tlb_nslabs - ? (index + nslots) : 0); - - goto found; - } + index = ALIGN(io_tlb_index, stride); + if (index >= io_tlb_nslabs) + index = 0; + wrap = index; + + do { + while (iommu_is_span_boundary(index, nslots, offset_slots, + max_slots)) { index += stride; if (index >= io_tlb_nslabs) index = 0; - } while (index != wrap); + if (index == wrap) + goto not_found; + } - not_found: - spin_unlock_irqrestore(&io_tlb_lock, flags); - return NULL; - } - found: + /* + * If we find a slot that indicates we have 'nslots' number of + * contiguous buffers, we allocate the buffers from that slot + * and mark the entries as '0' indicating unavailable. + */ + if (io_tlb_list[index] >= nslots) { + int count = 0; + + for (i = index; i < (int) (index + nslots); i++) + io_tlb_list[i] = 0; + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); + + /* + * Update the indices to avoid searching in the next + * round. + */ + io_tlb_index = ((index + nslots) < io_tlb_nslabs + ? (index + nslots) : 0); + + goto found; + } + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + } while (index != wrap); + +not_found: + spin_unlock_irqrestore(&io_tlb_lock, flags); + return NULL; +found: spin_unlock_irqrestore(&io_tlb_lock, flags); /* @@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ dma_addr_t -swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, + int dir, struct dma_attrs *attrs) { dma_addr_t dev_addr = virt_to_bus(ptr); void *map; @@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) return dev_addr; } +EXPORT_SYMBOL(swiotlb_map_single_attrs); + +dma_addr_t +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +{ + return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); +} /* * Unmap a single streaming mode DMA translation. The dma_addr and size must @@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) * whatever the device wrote there. */ void -swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, - int dir) +swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, struct dma_attrs *attrs) { char *dma_addr = bus_to_virt(dev_addr); @@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); } +EXPORT_SYMBOL(swiotlb_unmap_single_attrs); +void +swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, + int dir) +{ + return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); +} /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. @@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, SYNC_FOR_DEVICE); } +void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, + struct dma_attrs *); /* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_single @@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, * same here. */ int -swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir) +swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir, struct dma_attrs *attrs) { struct scatterlist *sg; void *addr; @@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); - swiotlb_unmap_sg(hwdev, sgl, i, dir); + swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, + attrs); sgl[0].dma_length = 0; return 0; } @@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, } return nelems; } +EXPORT_SYMBOL(swiotlb_map_sg_attrs); + +int +swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir) +{ + return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); +} /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_single() above. */ void -swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir) +swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, + int nelems, int dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); } } +EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); + +void +swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir) +{ + return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); +} /* * Make physical memory consistent for a set of streaming mode DMA translations |