diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 38 | ||||
-rw-r--r-- | lib/bitmap.c | 89 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 49 | ||||
-rw-r--r-- | lib/find_next_bit.c | 3 | ||||
-rw-r--r-- | lib/radix-tree.c | 143 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 18 | ||||
-rw-r--r-- | lib/swiotlb.c | 2 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate.c | 6 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate_syms.c | 2 | ||||
-rw-r--r-- | lib/zlib_inflate/infblock.c | 4 | ||||
-rw-r--r-- | lib/zlib_inflate/infblock.h | 4 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate_syms.c | 2 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate_sync.c | 4 |
13 files changed, 198 insertions, 166 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 80598cf..a314e66 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -9,15 +9,9 @@ config PRINTK_TIME in kernel startup. -config DEBUG_KERNEL - bool "Kernel debugging" - help - Say Y here if you are developing drivers or trying to debug and - identify kernel problems. - config MAGIC_SYSRQ bool "Magic SysRq key" - depends on DEBUG_KERNEL && !UML + depends on !UML help If you say Y here, you will have some control over the system even if the system crashes for example during kernel debugging (e.g., you @@ -29,6 +23,12 @@ config MAGIC_SYSRQ keys are documented in <file:Documentation/sysrq.txt>. Don't say Y unless you really know what this hack does. +config DEBUG_KERNEL + bool "Kernel debugging" + help + Say Y here if you are developing drivers or trying to debug and + identify kernel problems. + config LOG_BUF_SHIFT int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL range 12 21 @@ -79,7 +79,7 @@ config SCHEDSTATS config DEBUG_SLAB bool "Debug memory allocations" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && SLAB help Say Y here to have the kernel do limited verification on memory allocation as well as poisoning memory on free to catch use of freed @@ -95,6 +95,14 @@ config DEBUG_PREEMPT if kernel code uses it in a preemption-unsafe way. Also, the kernel will detect preemption count underflows. +config DEBUG_MUTEXES + bool "Mutex debugging, deadlock detection" + default y + depends on DEBUG_KERNEL + help + This allows mutex semantics violations and mutex related deadlocks + (lockups) to be detected and reported automatically. + config DEBUG_SPINLOCK bool "Spinlock debugging" depends on DEBUG_KERNEL @@ -187,6 +195,20 @@ config FRAME_POINTER some architectures or if you use external debuggers. If you don't debug the kernel, you can say N. +config FORCED_INLINING + bool "Force gcc to inline functions marked 'inline'" + depends on DEBUG_KERNEL + default y + help + This option determines if the kernel forces gcc to inline the functions + developers have marked 'inline'. Doing so takes away freedom from gcc to + do what it thinks is best, which is desirable for the gcc 3.x series of + compilers. The gcc 4.x series have a rewritten inlining algorithm and + disabling this option will generate a smaller kernel there. Hopefully + this algorithm is so good that allowing gcc4 to make the decision can + become the default in the future, until then this option is there to + test gcc for this. + config RCU_TORTURE_TEST tristate "torture tests for RCU" depends on DEBUG_KERNEL diff --git a/lib/bitmap.c b/lib/bitmap.c index 23d3b11..48e7083 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -519,7 +519,7 @@ EXPORT_SYMBOL(bitmap_parselist); * * Map the bit at position @pos in @buf (of length @bits) to the * ordinal of which set bit it is. If it is not set or if @pos - * is not a valid bit position, map to zero (0). + * is not a valid bit position, map to -1. * * If for example, just bits 4 through 7 are set in @buf, then @pos * values 4 through 7 will get mapped to 0 through 3, respectively, @@ -531,18 +531,19 @@ EXPORT_SYMBOL(bitmap_parselist); */ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) { - int ord = 0; + int i, ord; - if (pos >= 0 && pos < bits) { - int i; + if (pos < 0 || pos >= bits || !test_bit(pos, buf)) + return -1; - for (i = find_first_bit(buf, bits); - i < pos; - i = find_next_bit(buf, bits, i + 1)) - ord++; - if (i > pos) - ord = 0; + i = find_first_bit(buf, bits); + ord = 0; + while (i < pos) { + i = find_next_bit(buf, bits, i + 1); + ord++; } + BUG_ON(i != pos); + return ord; } @@ -553,11 +554,12 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) * @bits: number of valid bit positions in @buf * * Map the ordinal offset of bit @ord in @buf to its position in @buf. - * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0). + * Value of @ord should be in range 0 <= @ord < weight(buf), else + * results are undefined. * * If for example, just bits 4 through 7 are set in @buf, then @ord * values 0 through 3 will get mapped to 4 through 7, respectively, - * and all other @ord valuds will get mapped to 0. When @ord value 3 + * and all other @ord values return undefined values. When @ord value 3 * gets mapped to (returns) @pos value 7 in this example, that means * that the 3rd set bit (starting with 0th) is at position 7 in @buf. * @@ -583,8 +585,8 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) /** * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap - * @src: subset to be remapped * @dst: remapped result + * @src: subset to be remapped * @old: defines domain of map * @new: defines range of map * @bits: number of bits in each of these bitmaps @@ -596,49 +598,42 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) * weight of @old, map the position of the n-th set bit in @old to * the position of the m-th set bit in @new, where m == n % w. * - * If either of the @old and @new bitmaps are empty, or if@src and @dst - * point to the same location, then this routine does nothing. + * If either of the @old and @new bitmaps are empty, or if @src and + * @dst point to the same location, then this routine copies @src + * to @dst. * - * The positions of unset bits in @old are mapped to the position of - * the first set bit in @new. + * The positions of unset bits in @old are mapped to themselves + * (the identify map). * * Apply the above specified mapping to @src, placing the result in * @dst, clearing any bits previously set in @dst. * - * The resulting value of @dst will have either the same weight as - * @src, or less weight in the general case that the mapping wasn't - * injective due to the weight of @new being less than that of @old. - * The resulting value of @dst will never have greater weight than - * that of @src, except perhaps in the case that one of the above - * conditions was not met and this routine just returned. - * * For example, lets say that @old has bits 4 through 7 set, and * @new has bits 12 through 15 set. This defines the mapping of bit * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other - * bit positions to 12 (the first set bit in @new. So if say @src - * comes into this routine with bits 1, 5 and 7 set, then @dst should - * leave with bits 12, 13 and 15 set. + * bit positions unchanged. So if say @src comes into this routine + * with bits 1, 5 and 7 set, then @dst should leave with bits 1, + * 13 and 15 set. */ void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, int bits) { - int s; + int oldbit, w; - if (bitmap_weight(old, bits) == 0) - return; - if (bitmap_weight(new, bits) == 0) - return; if (dst == src) /* following doesn't handle inplace remaps */ return; - bitmap_zero(dst, bits); - for (s = find_first_bit(src, bits); - s < bits; - s = find_next_bit(src, bits, s + 1)) { - int x = bitmap_pos_to_ord(old, s, bits); - int y = bitmap_ord_to_pos(new, x, bits); - set_bit(y, dst); + + w = bitmap_weight(new, bits); + for (oldbit = find_first_bit(src, bits); + oldbit < bits; + oldbit = find_next_bit(src, bits, oldbit + 1)) { + int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) + set_bit(oldbit, dst); /* identity map */ + else + set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); } } EXPORT_SYMBOL(bitmap_remap); @@ -657,8 +652,8 @@ EXPORT_SYMBOL(bitmap_remap); * weight of @old, map the position of the n-th set bit in @old to * the position of the m-th set bit in @new, where m == n % w. * - * The positions of unset bits in @old are mapped to the position of - * the first set bit in @new. + * The positions of unset bits in @old are mapped to themselves + * (the identify map). * * Apply the above specified mapping to bit position @oldbit, returning * the new bit position. @@ -666,14 +661,18 @@ EXPORT_SYMBOL(bitmap_remap); * For example, lets say that @old has bits 4 through 7 set, and * @new has bits 12 through 15 set. This defines the mapping of bit * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other - * bit positions to 12 (the first set bit in @new. So if say @oldbit - * is 5, then this routine returns 13. + * bit positions unchanged. So if say @oldbit is 5, then this routine + * returns 13. */ int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits) { - int x = bitmap_pos_to_ord(old, oldbit, bits); - return bitmap_ord_to_pos(new, x, bits); + int w = bitmap_weight(new, bits); + int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) + return oldbit; + else + return bitmap_ord_to_pos(new, n % w, bits); } EXPORT_SYMBOL(bitmap_bitremap); diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 305a966..a65c314 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -1,47 +1,11 @@ #include <linux/module.h> #include <linux/spinlock.h> #include <asm/atomic.h> -#include <asm/system.h> -#ifdef __HAVE_ARCH_CMPXCHG /* * This is an implementation of the notion of "decrement a * reference count, and return locked if it decremented to zero". * - * This implementation can be used on any architecture that - * has a cmpxchg, and where atomic->value is an int holding - * the value of the atomic (i.e. the high bits aren't used - * for a lock or anything like that). - */ -int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) -{ - int counter; - int newcount; - - for (;;) { - counter = atomic_read(atomic); - newcount = counter - 1; - if (!newcount) - break; /* do it the slow way */ - - newcount = cmpxchg(&atomic->counter, counter, newcount); - if (newcount == counter) - return 0; - } - - spin_lock(lock); - if (atomic_dec_and_test(atomic)) - return 1; - spin_unlock(lock); - return 0; -} -#else -/* - * This is an architecture-neutral, but slow, - * implementation of the notion of "decrement - * a reference count, and return locked if it - * decremented to zero". - * * NOTE NOTE NOTE! This is _not_ equivalent to * * if (atomic_dec_and_test(&atomic)) { @@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) * * because the spin-lock and the decrement must be * "atomic". - * - * This slow version gets the spinlock unconditionally, - * and releases it if it isn't needed. Architectures - * are encouraged to come up with better approaches, - * this is trivially done efficiently using a load-locked - * store-conditional approach, for example. */ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { +#ifdef CONFIG_SMP + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; +#endif + /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; spin_unlock(lock); return 0; } -#endif EXPORT_SYMBOL(_atomic_dec_and_lock); diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index d08302d..c05b4b1 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -10,6 +10,7 @@ */ #include <linux/bitops.h> +#include <linux/module.h> int find_next_bit(const unsigned long *addr, int size, int offset) { @@ -53,3 +54,5 @@ int find_next_bit(const unsigned long *addr, int size, int offset) return offset; } + +EXPORT_SYMBOL(find_next_bit); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 88511c3..c0bd4a9 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -137,18 +137,31 @@ out: static inline void tag_set(struct radix_tree_node *node, int tag, int offset) { - if (!test_bit(offset, &node->tags[tag][0])) - __set_bit(offset, &node->tags[tag][0]); + __set_bit(offset, node->tags[tag]); } static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) { - __clear_bit(offset, &node->tags[tag][0]); + __clear_bit(offset, node->tags[tag]); } static inline int tag_get(struct radix_tree_node *node, int tag, int offset) { - return test_bit(offset, &node->tags[tag][0]); + return test_bit(offset, node->tags[tag]); +} + +/* + * Returns 1 if any slot in the node has this tag set. + * Otherwise returns 0. + */ +static inline int any_tag_set(struct radix_tree_node *node, int tag) +{ + int idx; + for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { + if (node->tags[tag][idx]) + return 1; + } + return 0; } /* @@ -185,15 +198,9 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) * into the newly-pushed top-level node(s) */ for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { - int idx; - tags[tag] = 0; - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (root->rnode->tags[tag][idx]) { - tags[tag] = 1; - break; - } - } + if (any_tag_set(root->rnode, tag)) + tags[tag] = 1; } do { @@ -246,7 +253,7 @@ int radix_tree_insert(struct radix_tree_root *root, shift = (height-1) * RADIX_TREE_MAP_SHIFT; offset = 0; /* uninitialised var warning */ - while (height > 0) { + do { if (slot == NULL) { /* Have to add a child node. */ if (!(slot = radix_tree_node_alloc(root))) @@ -264,18 +271,16 @@ int radix_tree_insert(struct radix_tree_root *root, slot = node->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; - } + } while (height > 0); if (slot != NULL) return -EEXIST; - if (node) { - node->count++; - node->slots[offset] = item; - BUG_ON(tag_get(node, 0, offset)); - BUG_ON(tag_get(node, 1, offset)); - } else - root->rnode = item; + BUG_ON(!node); + node->count++; + node->slots[offset] = item; + BUG_ON(tag_get(node, 0, offset)); + BUG_ON(tag_get(node, 1, offset)); return 0; } @@ -367,7 +372,8 @@ void *radix_tree_tag_set(struct radix_tree_root *root, int offset; offset = (index >> shift) & RADIX_TREE_MAP_MASK; - tag_set(slot, tag, offset); + if (!tag_get(slot, tag, offset)) + tag_set(slot, tag, offset); slot = slot->slots[offset]; BUG_ON(slot == NULL); shift -= RADIX_TREE_MAP_SHIFT; @@ -427,13 +433,11 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, goto out; do { - int idx; - + if (!tag_get(pathp->node, tag, pathp->offset)) + goto out; tag_clear(pathp->node, tag, pathp->offset); - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (pathp->node->tags[tag][idx]) - goto out; - } + if (any_tag_set(pathp->node, tag)) + goto out; pathp--; } while (pathp->node); out: @@ -674,6 +678,29 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, EXPORT_SYMBOL(radix_tree_gang_lookup_tag); /** + * radix_tree_shrink - shrink height of a radix tree to minimal + * @root radix tree root + */ +static inline void radix_tree_shrink(struct radix_tree_root *root) +{ + /* try to shrink tree height */ + while (root->height > 1 && + root->rnode->count == 1 && + root->rnode->slots[0]) { + struct radix_tree_node *to_free = root->rnode; + + root->rnode = to_free->slots[0]; + root->height--; + /* must only free zeroed nodes into the slab */ + tag_clear(to_free, 0, 0); + tag_clear(to_free, 1, 0); + to_free->slots[0] = NULL; + to_free->count = 0; + radix_tree_node_free(to_free); + } +} + +/** * radix_tree_delete - delete an item from a radix tree * @root: radix tree root * @index: index key @@ -691,6 +718,8 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) void *ret = NULL; char tags[RADIX_TREE_TAGS]; int nr_cleared_tags; + int tag; + int offset; height = root->height; if (index > radix_tree_maxindex(height)) @@ -701,16 +730,14 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) slot = root->rnode; for ( ; height > 0; height--) { - int offset; - if (slot == NULL) goto out; + pathp++; offset = (index >> shift) & RADIX_TREE_MAP_MASK; - pathp[1].offset = offset; - pathp[1].node = slot; + pathp->offset = offset; + pathp->node = slot; slot = slot->slots[offset]; - pathp++; shift -= RADIX_TREE_MAP_SHIFT; } @@ -723,35 +750,39 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) /* * Clear all tags associated with the just-deleted item */ - memset(tags, 0, sizeof(tags)); - do { - int tag; + nr_cleared_tags = 0; + for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + if (tag_get(pathp->node, tag, pathp->offset)) { + tag_clear(pathp->node, tag, pathp->offset); + tags[tag] = 0; + nr_cleared_tags++; + } else + tags[tag] = 1; + } - nr_cleared_tags = RADIX_TREE_TAGS; + for (pathp--; nr_cleared_tags && pathp->node; pathp--) { for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { - int idx; - if (tags[tag]) continue; tag_clear(pathp->node, tag, pathp->offset); - - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (pathp->node->tags[tag][idx]) { - tags[tag] = 1; - nr_cleared_tags--; - break; - } + if (any_tag_set(pathp->node, tag)) { + tags[tag] = 1; + nr_cleared_tags--; } } - pathp--; - } while (pathp->node && nr_cleared_tags); + } /* Now free the nodes we do not need anymore */ for (pathp = orig_pathp; pathp->node; pathp--) { pathp->node->slots[pathp->offset] = NULL; - if (--pathp->node->count) + pathp->node->count--; + + if (pathp->node->count) { + if (pathp->node == root->rnode) + radix_tree_shrink(root); goto out; + } /* Node with zero slots in use so free it */ radix_tree_node_free(pathp->node); @@ -770,15 +801,11 @@ EXPORT_SYMBOL(radix_tree_delete); */ int radix_tree_tagged(struct radix_tree_root *root, int tag) { - int idx; - - if (!root->rnode) - return 0; - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (root->rnode->tags[tag][idx]) - return 1; - } - return 0; + struct radix_tree_node *rnode; + rnode = root->rnode; + if (!rnode) + return 0; + return any_tag_set(rnode, tag); } EXPORT_SYMBOL(radix_tree_tagged); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index dcd4be9..c8bb8cc 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -19,10 +19,11 @@ static void spin_bug(spinlock_t *lock, const char *msg) if (xchg(&print_once, 0)) { if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) owner = lock->owner; - printk("BUG: spinlock %s on CPU#%d, %s/%d\n", + printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", msg, raw_smp_processor_id(), current->comm, current->pid); - printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n", + printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " + ".owner_cpu: %d\n", lock, lock->magic, owner ? owner->comm : "<none>", owner ? owner->pid : -1, @@ -78,7 +79,8 @@ static void __spin_lock_debug(spinlock_t *lock) /* lockup suspected: */ if (print_once) { print_once = 0; - printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n", + printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " + "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); @@ -120,8 +122,8 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) static long print_once = 1; if (xchg(&print_once, 0)) { - printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, - raw_smp_processor_id(), current->comm, + printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", + msg, raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); #ifdef CONFIG_SMP @@ -149,7 +151,8 @@ static void __read_lock_debug(rwlock_t *lock) /* lockup suspected: */ if (print_once) { print_once = 0; - printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n", + printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " + "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); @@ -221,7 +224,8 @@ static void __write_lock_debug(rwlock_t *lock) /* lockup suspected: */ if (print_once) { print_once = 0; - printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n", + printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " + "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 3b48205..0af497b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -463,7 +463,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ dma_addr_t handle; handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (dma_mapping_error(handle)) + if (swiotlb_dma_mapping_error(handle)) return NULL; ret = phys_to_virt(handle); diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index ad9a1bf..1653dd9 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c @@ -255,6 +255,7 @@ int zlib_deflateInit2_( } /* ========================================================================= */ +#if 0 int zlib_deflateSetDictionary( z_streamp strm, const Byte *dictionary, @@ -297,6 +298,7 @@ int zlib_deflateSetDictionary( if (hash_head) hash_head = 0; /* to make compiler happy */ return Z_OK; } +#endif /* 0 */ /* ========================================================================= */ int zlib_deflateReset( @@ -330,6 +332,7 @@ int zlib_deflateReset( } /* ========================================================================= */ +#if 0 int zlib_deflateParams( z_streamp strm, int level, @@ -365,6 +368,7 @@ int zlib_deflateParams( s->strategy = strategy; return err; } +#endif /* 0 */ /* ========================================================================= * Put a short in the pending buffer. The 16-bit value is put in MSB order. @@ -572,6 +576,7 @@ int zlib_deflateEnd( /* ========================================================================= * Copy the source state to the destination state. */ +#if 0 int zlib_deflateCopy ( z_streamp dest, z_streamp source @@ -624,6 +629,7 @@ int zlib_deflateCopy ( return Z_OK; #endif } +#endif /* 0 */ /* =========================================================================== * Read a new buffer from the current input stream, update the adler32 diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c index 5985b28..767b573 100644 --- a/lib/zlib_deflate/deflate_syms.c +++ b/lib/zlib_deflate/deflate_syms.c @@ -16,6 +16,4 @@ EXPORT_SYMBOL(zlib_deflateInit_); EXPORT_SYMBOL(zlib_deflateInit2_); EXPORT_SYMBOL(zlib_deflateEnd); EXPORT_SYMBOL(zlib_deflateReset); -EXPORT_SYMBOL(zlib_deflateCopy); -EXPORT_SYMBOL(zlib_deflateParams); MODULE_LICENSE("GPL"); diff --git a/lib/zlib_inflate/infblock.c b/lib/zlib_inflate/infblock.c index 50f21ca4..c16cdef 100644 --- a/lib/zlib_inflate/infblock.c +++ b/lib/zlib_inflate/infblock.c @@ -338,6 +338,7 @@ int zlib_inflate_blocks_free( } +#if 0 void zlib_inflate_set_dictionary( inflate_blocks_statef *s, const Byte *d, @@ -347,15 +348,18 @@ void zlib_inflate_set_dictionary( memcpy(s->window, d, n); s->read = s->write = s->window + n; } +#endif /* 0 */ /* Returns true if inflate is currently at the end of a block generated * by Z_SYNC_FLUSH or Z_FULL_FLUSH. * IN assertion: s != NULL */ +#if 0 int zlib_inflate_blocks_sync_point( inflate_blocks_statef *s ) { return s->mode == LENS; } +#endif /* 0 */ diff --git a/lib/zlib_inflate/infblock.h b/lib/zlib_inflate/infblock.h index f5221dd..ceee60b 100644 --- a/lib/zlib_inflate/infblock.h +++ b/lib/zlib_inflate/infblock.h @@ -33,12 +33,16 @@ extern int zlib_inflate_blocks_free ( inflate_blocks_statef *, z_streamp); +#if 0 extern void zlib_inflate_set_dictionary ( inflate_blocks_statef *s, const Byte *d, /* dictionary */ uInt n); /* dictionary length */ +#endif /* 0 */ +#if 0 extern int zlib_inflate_blocks_sync_point ( inflate_blocks_statef *s); +#endif /* 0 */ #endif /* _INFBLOCK_H */ diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c index aa1b081..ef49738 100644 --- a/lib/zlib_inflate/inflate_syms.c +++ b/lib/zlib_inflate/inflate_syms.c @@ -15,8 +15,6 @@ EXPORT_SYMBOL(zlib_inflate); EXPORT_SYMBOL(zlib_inflateInit_); EXPORT_SYMBOL(zlib_inflateInit2_); EXPORT_SYMBOL(zlib_inflateEnd); -EXPORT_SYMBOL(zlib_inflateSync); EXPORT_SYMBOL(zlib_inflateReset); -EXPORT_SYMBOL(zlib_inflateSyncPoint); EXPORT_SYMBOL(zlib_inflateIncomp); MODULE_LICENSE("GPL"); diff --git a/lib/zlib_inflate/inflate_sync.c b/lib/zlib_inflate/inflate_sync.c index e07bdb2..61411ff 100644 --- a/lib/zlib_inflate/inflate_sync.c +++ b/lib/zlib_inflate/inflate_sync.c @@ -7,6 +7,7 @@ #include "infblock.h" #include "infutil.h" +#if 0 int zlib_inflateSync( z_streamp z ) @@ -57,6 +58,7 @@ int zlib_inflateSync( z->state->mode = BLOCKS; return Z_OK; } +#endif /* 0 */ /* Returns true if inflate is currently at the end of a block generated @@ -66,6 +68,7 @@ int zlib_inflateSync( * decompressing, PPP checks that at the end of input packet, inflate is * waiting for these length bytes. */ +#if 0 int zlib_inflateSyncPoint( z_streamp z ) @@ -74,6 +77,7 @@ int zlib_inflateSyncPoint( return Z_STREAM_ERROR; return zlib_inflate_blocks_sync_point(z->state->blocks); } +#endif /* 0 */ /* * This subroutine adds the data at next_in/avail_in to the output history |