summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-17 12:52:15 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-17 12:56:49 +0200
commiteadb8a091b27a840de7450f84ecff5ef13476424 (patch)
tree58c3782d40def63baa8167f3d31e3048cb4c7660 /lib
parent73874005cd8800440be4299bd095387fff4b90ac (diff)
parent65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff)
downloadop-kernel-dev-eadb8a091b27a840de7450f84ecff5ef13476424.zip
op-kernel-dev-eadb8a091b27a840de7450f84ecff5ef13476424.tar.gz
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/Kconfig arch/x86/kernel/traps.c arch/x86/power/cpu.c arch/x86/power/cpu_32.c kernel/Makefile Semantic conflict: arch/x86/kernel/hw_breakpoint.c Merge reason: Resolve the conflicts, move from put_cpu_no_sched() to put_cpu() in arch/x86/kernel/hw_breakpoint.c. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug38
-rw-r--r--lib/Kconfig.kmemcheck91
-rw-r--r--lib/Makefile5
-rw-r--r--lib/atomic64.c175
-rw-r--r--lib/checksum.c193
-rw-r--r--lib/cpumask.c23
-rw-r--r--lib/dec_and_lock.c3
-rw-r--r--lib/dma-debug.c432
-rw-r--r--lib/extable.c21
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/radix-tree.c110
-rw-r--r--lib/rational.c62
-rw-r--r--lib/rbtree.c34
-rw-r--r--lib/swiotlb.c119
-rw-r--r--lib/vsprintf.c56
18 files changed, 1195 insertions, 199 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 8ade0a7..bb1326d 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -10,6 +10,9 @@ menu "Library routines"
config BITREVERSE
tristate
+config RATIONAL
+ boolean
+
config GENERIC_FIND_FIRST_BIT
bool
@@ -191,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
config NLATTR
bool
+#
+# Generic 64-bit atomic support is selected if needed
+#
+config GENERIC_ATOMIC64
+ bool
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6cdcf38..6b0c2d8a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
config DEBUG_SLAB
bool "Debug slab memory allocations"
- depends on DEBUG_KERNEL && SLAB
+ depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
help
Say Y here to have the kernel do limited verification on memory
allocation as well as poisoning memory on free to catch use of freed
@@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK
config SLUB_DEBUG_ON
bool "SLUB debugging on by default"
- depends on SLUB && SLUB_DEBUG
+ depends on SLUB && SLUB_DEBUG && !KMEMCHECK
default n
help
Boot with debugging on by default. SLUB boots by default with
@@ -336,6 +336,38 @@ config SLUB_STATS
out which slabs are relevant to a particular load.
Try running: slabinfo -DA
+config DEBUG_KMEMLEAK
+ bool "Kernel memory leak detector"
+ depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
+ !MEMORY_HOTPLUG
+ select DEBUG_SLAB if SLAB
+ select SLUB_DEBUG if SLUB
+ select DEBUG_FS if SYSFS
+ select STACKTRACE if STACKTRACE_SUPPORT
+ select KALLSYMS
+ help
+ Say Y here if you want to enable the memory leak
+ detector. The memory allocation/freeing is traced in a way
+ similar to the Boehm's conservative garbage collector, the
+ difference being that the orphan objects are not freed but
+ only shown in /sys/kernel/debug/kmemleak. Enabling this
+ feature will introduce an overhead to memory
+ allocations. See Documentation/kmemleak.txt for more
+ details.
+
+ In order to access the kmemleak file, debugfs needs to be
+ mounted (usually at /sys/kernel/debug).
+
+config DEBUG_KMEMLEAK_TEST
+ tristate "Simple test for the kernel memory leak detector"
+ depends on DEBUG_KMEMLEAK
+ help
+ Say Y or M here to build a test for the kernel memory leak
+ detector. This option enables a module that explicitly leaks
+ memory.
+
+ If unsure, say N.
+
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64)
@@ -964,3 +996,5 @@ config DMA_API_DEBUG
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
+
+source "lib/Kconfig.kmemcheck"
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
new file mode 100644
index 0000000..603c81b
--- /dev/null
+++ b/lib/Kconfig.kmemcheck
@@ -0,0 +1,91 @@
+config HAVE_ARCH_KMEMCHECK
+ bool
+
+menuconfig KMEMCHECK
+ bool "kmemcheck: trap use of uninitialized memory"
+ depends on DEBUG_KERNEL
+ depends on !X86_USE_3DNOW
+ depends on SLUB || SLAB
+ depends on !CC_OPTIMIZE_FOR_SIZE
+ depends on !FUNCTION_TRACER
+ select FRAME_POINTER
+ select STACKTRACE
+ default n
+ help
+ This option enables tracing of dynamically allocated kernel memory
+ to see if memory is used before it has been given an initial value.
+ Be aware that this requires half of your memory for bookkeeping and
+ will insert extra code at *every* read and write to tracked memory
+ thus slow down the kernel code (but user code is unaffected).
+
+ The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
+ or enable kmemcheck at boot-time. If the kernel is started with
+ kmemcheck=0, the large memory and CPU overhead is not incurred.
+
+choice
+ prompt "kmemcheck: default mode at boot"
+ depends on KMEMCHECK
+ default KMEMCHECK_ONESHOT_BY_DEFAULT
+ help
+ This option controls the default behaviour of kmemcheck when the
+ kernel boots and no kmemcheck= parameter is given.
+
+config KMEMCHECK_DISABLED_BY_DEFAULT
+ bool "disabled"
+ depends on KMEMCHECK
+
+config KMEMCHECK_ENABLED_BY_DEFAULT
+ bool "enabled"
+ depends on KMEMCHECK
+
+config KMEMCHECK_ONESHOT_BY_DEFAULT
+ bool "one-shot"
+ depends on KMEMCHECK
+ help
+ In one-shot mode, only the first error detected is reported before
+ kmemcheck is disabled.
+
+endchoice
+
+config KMEMCHECK_QUEUE_SIZE
+ int "kmemcheck: error queue size"
+ depends on KMEMCHECK
+ default 64
+ help
+ Select the maximum number of errors to store in the queue. Since
+ errors can occur virtually anywhere and in any context, we need a
+ temporary storage area which is guarantueed not to generate any
+ other faults. The queue will be emptied as soon as a tasklet may
+ be scheduled. If the queue is full, new error reports will be
+ lost.
+
+config KMEMCHECK_SHADOW_COPY_SHIFT
+ int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
+ depends on KMEMCHECK
+ range 2 8
+ default 5
+ help
+ Select the number of shadow bytes to save along with each entry of
+ the queue. These bytes indicate what parts of an allocation are
+ initialized, uninitialized, etc. and will be displayed when an
+ error is detected to help the debugging of a particular problem.
+
+config KMEMCHECK_PARTIAL_OK
+ bool "kmemcheck: allow partially uninitialized memory"
+ depends on KMEMCHECK
+ default y
+ help
+ This option works around certain GCC optimizations that produce
+ 32-bit reads from 16-bit variables where the upper 16 bits are
+ thrown away afterwards. This may of course also hide some real
+ bugs.
+
+config KMEMCHECK_BITOPS_OK
+ bool "kmemcheck: allow bit-field manipulation"
+ depends on KMEMCHECK
+ default n
+ help
+ This option silences warnings that would be generated for bit-field
+ accesses where not all the bits are initialized at the same time.
+ This may also hide some real bugs.
+
diff --git a/lib/Makefile b/lib/Makefile
index 33a40e4..8e9bcf9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -50,6 +50,7 @@ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
endif
obj-$(CONFIG_BITREVERSE) += bitrev.o
+obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
@@ -92,6 +93,10 @@ obj-$(CONFIG_NLATTR) += nlattr.o
obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
+obj-$(CONFIG_GENERIC_CSUM) += checksum.o
+
+obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/atomic64.c b/lib/atomic64.c
new file mode 100644
index 0000000..c5e7255
--- /dev/null
+++ b/lib/atomic64.c
@@ -0,0 +1,175 @@
+/*
+ * Generic implementation of 64-bit atomics using spinlocks,
+ * useful on processors that don't have 64-bit atomic instructions.
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+
+/*
+ * We use a hashed array of spinlocks to provide exclusive access
+ * to each atomic64_t variable. Since this is expected to used on
+ * systems with small numbers of CPUs (<= 4 or so), we use a
+ * relatively small array of 16 spinlocks to avoid wasting too much
+ * memory on the spinlock array.
+ */
+#define NR_LOCKS 16
+
+/*
+ * Ensure each lock is in a separate cacheline.
+ */
+static union {
+ spinlock_t lock;
+ char pad[L1_CACHE_BYTES];
+} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
+
+static inline spinlock_t *lock_addr(const atomic64_t *v)
+{
+ unsigned long addr = (unsigned long) v;
+
+ addr >>= L1_CACHE_SHIFT;
+ addr ^= (addr >> 8) ^ (addr >> 16);
+ return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
+}
+
+long long atomic64_read(const atomic64_t *v)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+void atomic64_set(atomic64_t *v, long long i)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+
+ spin_lock_irqsave(lock, flags);
+ v->counter = i;
+ spin_unlock_irqrestore(lock, flags);
+}
+
+void atomic64_add(long long a, atomic64_t *v)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+
+ spin_lock_irqsave(lock, flags);
+ v->counter += a;
+ spin_unlock_irqrestore(lock, flags);
+}
+
+long long atomic64_add_return(long long a, atomic64_t *v)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ spin_lock_irqsave(lock, flags);
+ val = v->counter += a;
+ spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+void atomic64_sub(long long a, atomic64_t *v)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+
+ spin_lock_irqsave(lock, flags);
+ v->counter -= a;
+ spin_unlock_irqrestore(lock, flags);
+}
+
+long long atomic64_sub_return(long long a, atomic64_t *v)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ spin_lock_irqsave(lock, flags);
+ val = v->counter -= a;
+ spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ spin_lock_irqsave(lock, flags);
+ val = v->counter - 1;
+ if (val >= 0)
+ v->counter = val;
+ spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ if (val == o)
+ v->counter = n;
+ spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+long long atomic64_xchg(atomic64_t *v, long long new)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ v->counter = new;
+ spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
+ int ret = 1;
+
+ spin_lock_irqsave(lock, flags);
+ if (v->counter != u) {
+ v->counter += a;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(lock, flags);
+ return ret;
+}
+
+static int init_atomic64_lock(void)
+{
+ int i;
+
+ for (i = 0; i < NR_LOCKS; ++i)
+ spin_lock_init(&atomic64_lock[i].lock);
+ return 0;
+}
+
+pure_initcall(init_atomic64_lock);
diff --git a/lib/checksum.c b/lib/checksum.c
new file mode 100644
index 0000000..12e5a1c
--- /dev/null
+++ b/lib/checksum.c
@@ -0,0 +1,193 @@
+/*
+ *
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * IP/TCP/UDP checksumming routines
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Tom May, <ftom@netcom.com>
+ * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
+ * Lots of code moved from tcp.c and ip.c; see those files
+ * for more names.
+ *
+ * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
+ * Fixed some nasty bugs, causing some horrible crashes.
+ * A: At some points, the sum (%0) was used as
+ * length-counter instead of the length counter
+ * (%1). Thanks to Roman Hodek for pointing this out.
+ * B: GCC seems to mess up if one uses too many
+ * data-registers to hold input values and one tries to
+ * specify d0 and d1 as scratch registers. Letting gcc
+ * choose these registers itself solves the problem.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
+ kills, so most of the assembly has to go. */
+
+#include <linux/module.h>
+#include <net/checksum.h>
+
+#include <asm/byteorder.h>
+
+static inline unsigned short from32to16(unsigned long x)
+{
+ /* add up 16-bit and 16-bit for 16+c bit */
+ x = (x & 0xffff) + (x >> 16);
+ /* add up carry.. */
+ x = (x & 0xffff) + (x >> 16);
+ return x;
+}
+
+static unsigned int do_csum(const unsigned char *buff, int len)
+{
+ int odd, count;
+ unsigned long result = 0;
+
+ if (len <= 0)
+ goto out;
+ odd = 1 & (unsigned long) buff;
+ if (odd) {
+ result = *buff;
+ len--;
+ buff++;
+ }
+ count = len >> 1; /* nr of 16-bit words.. */
+ if (count) {
+ if (2 & (unsigned long) buff) {
+ result += *(unsigned short *) buff;
+ count--;
+ len -= 2;
+ buff += 2;
+ }
+ count >>= 1; /* nr of 32-bit words.. */
+ if (count) {
+ unsigned long carry = 0;
+ do {
+ unsigned long w = *(unsigned long *) buff;
+ count--;
+ buff += 4;
+ result += carry;
+ result += w;
+ carry = (w > result);
+ } while (count);
+ result += carry;
+ result = (result & 0xffff) + (result >> 16);
+ }
+ if (len & 2) {
+ result += *(unsigned short *) buff;
+ buff += 2;
+ }
+ }
+ if (len & 1)
+ result += (*buff << 8);
+ result = from32to16(result);
+ if (odd)
+ result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+out:
+ return result;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ return (__force __sum16)~do_csum(iph, ihl*4);
+}
+EXPORT_SYMBOL(ip_fast_csum);
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum wsum)
+{
+ unsigned int sum = (__force unsigned int)wsum;
+ unsigned int result = do_csum(buff, len);
+
+ /* add in old sum, and carry.. */
+ result += sum;
+ if (sum > result)
+ result += 1;
+ return (__force __wsum)result;
+}
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+__sum16 ip_compute_csum(const void *buff, int len)
+{
+ return (__force __sum16)~do_csum(buff, len);
+}
+EXPORT_SYMBOL(ip_compute_csum);
+
+/*
+ * copy from fs while checksumming, otherwise like csum_partial
+ */
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum sum, int *csum_err)
+{
+ int missing;
+
+ missing = __copy_from_user(dst, src, len);
+ if (missing) {
+ memset(dst + len - missing, 0, missing);
+ *csum_err = -EFAULT;
+ } else
+ *csum_err = 0;
+
+ return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+
+/*
+ * copy from ds while checksumming, otherwise like csum_partial
+ */
+__wsum
+csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy);
+
+#ifndef csum_tcpudp_nofold
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ unsigned long long s = (__force u32)sum;
+
+ s += (__force u32)saddr;
+ s += (__force u32)daddr;
+#ifdef __BIG_ENDIAN
+ s += proto + len;
+#else
+ s += (proto + len) << 8;
+#endif
+ s += (s >> 32);
+ return (__force __wsum)s;
+}
+EXPORT_SYMBOL(csum_tcpudp_nofold);
+#endif
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 1f71b97..7bb4142 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
*/
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
- if (likely(slab_is_available()))
- *mask = kmalloc_node(cpumask_size(), flags, node);
- else {
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
- printk(KERN_ERR
- "=> alloc_cpumask_var: kmalloc not available!\n");
-#endif
- *mask = NULL;
- }
+ *mask = kmalloc_node(cpumask_size(), flags, node);
+
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (!*mask) {
printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
@@ -119,6 +112,12 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
}
EXPORT_SYMBOL(alloc_cpumask_var_node);
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
+{
+ return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
+}
+EXPORT_SYMBOL(zalloc_cpumask_var_node);
+
/**
* alloc_cpumask_var - allocate a struct cpumask
* @mask: pointer to cpumask_var_t where the cpumask is returned
@@ -135,6 +134,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
}
EXPORT_SYMBOL(alloc_cpumask_var);
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var(mask, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL(zalloc_cpumask_var);
+
/**
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
* @mask: pointer to cpumask_var_t where the cpumask is returned
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index a65c314..e73822a 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -19,11 +19,10 @@
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
-#ifdef CONFIG_SMP
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
-#endif
+
/* Otherwise do it the slow way */
spin_lock(lock);
if (atomic_dec_and_test(atomic))
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 69da09a..ad65fc0 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -23,9 +23,11 @@
#include <linux/dma-debug.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -85,6 +87,7 @@ static u32 show_num_errors = 1;
static u32 num_free_entries;
static u32 min_free_entries;
+static u32 nr_total_entries;
/* number of preallocated entries requested by kernel cmdline */
static u32 req_entries;
@@ -97,6 +100,16 @@ static struct dentry *show_all_errors_dent __read_mostly;
static struct dentry *show_num_errors_dent __read_mostly;
static struct dentry *num_free_entries_dent __read_mostly;
static struct dentry *min_free_entries_dent __read_mostly;
+static struct dentry *filter_dent __read_mostly;
+
+/* per-driver filter related state */
+
+#define NAME_MAX_LEN 64
+
+static char current_driver_name[NAME_MAX_LEN] __read_mostly;
+static struct device_driver *current_driver __read_mostly;
+
+static DEFINE_RWLOCK(driver_name_lock);
static const char *type2name[4] = { "single", "page",
"scather-gather", "coherent" };
@@ -104,6 +117,11 @@ static const char *type2name[4] = { "single", "page",
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE" };
+/* little merge helper - remove it after the merge window */
+#ifndef BUS_NOTIFY_UNBOUND_DRIVER
+#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
+#endif
+
/*
* The access to some variables in this macro is racy. We can't use atomic_t
* here because all these variables are exported to debugfs. Some of them even
@@ -121,15 +139,54 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
{
#ifdef CONFIG_STACKTRACE
if (entry) {
- printk(KERN_WARNING "Mapped at:\n");
+ pr_warning("Mapped at:\n");
print_stack_trace(&entry->stacktrace, 0);
}
#endif
}
+static bool driver_filter(struct device *dev)
+{
+ struct device_driver *drv;
+ unsigned long flags;
+ bool ret;
+
+ /* driver filter off */
+ if (likely(!current_driver_name[0]))
+ return true;
+
+ /* driver filter on and initialized */
+ if (current_driver && dev->driver == current_driver)
+ return true;
+
+ if (current_driver || !current_driver_name[0])
+ return false;
+
+ /* driver filter on but not yet initialized */
+ drv = get_driver(dev->driver);
+ if (!drv)
+ return false;
+
+ /* lock to protect against change of current_driver_name */
+ read_lock_irqsave(&driver_name_lock, flags);
+
+ ret = false;
+ if (drv->name &&
+ strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
+ current_driver = drv;
+ ret = true;
+ }
+
+ read_unlock_irqrestore(&driver_name_lock, flags);
+ put_driver(drv);
+
+ return ret;
+}
+
#define err_printk(dev, entry, format, arg...) do { \
error_count += 1; \
- if (show_all_errors || show_num_errors > 0) { \
+ if (driver_filter(dev) && \
+ (show_all_errors || show_num_errors > 0)) { \
WARN(1, "%s %s: " format, \
dev_driver_string(dev), \
dev_name(dev) , ## arg); \
@@ -185,15 +242,50 @@ static void put_hash_bucket(struct hash_bucket *bucket,
static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
struct dma_debug_entry *ref)
{
- struct dma_debug_entry *entry;
+ struct dma_debug_entry *entry, *ret = NULL;
+ int matches = 0, match_lvl, last_lvl = 0;
list_for_each_entry(entry, &bucket->list, list) {
- if ((entry->dev_addr == ref->dev_addr) &&
- (entry->dev == ref->dev))
+ if ((entry->dev_addr != ref->dev_addr) ||
+ (entry->dev != ref->dev))
+ continue;
+
+ /*
+ * Some drivers map the same physical address multiple
+ * times. Without a hardware IOMMU this results in the
+ * same device addresses being put into the dma-debug
+ * hash multiple times too. This can result in false
+ * positives being reported. Therfore we implement a
+ * best-fit algorithm here which returns the entry from
+ * the hash which fits best to the reference value
+ * instead of the first-fit.
+ */
+ matches += 1;
+ match_lvl = 0;
+ entry->size == ref->size ? ++match_lvl : match_lvl;
+ entry->type == ref->type ? ++match_lvl : match_lvl;
+ entry->direction == ref->direction ? ++match_lvl : match_lvl;
+
+ if (match_lvl == 3) {
+ /* perfect-fit - return the result */
return entry;
+ } else if (match_lvl > last_lvl) {
+ /*
+ * We found an entry that fits better then the
+ * previous one
+ */
+ last_lvl = match_lvl;
+ ret = entry;
+ }
}
- return NULL;
+ /*
+ * If we have multiple matches but no perfect-fit, just return
+ * NULL.
+ */
+ ret = (matches == 1) ? ret : NULL;
+
+ return ret;
}
/*
@@ -257,6 +349,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
put_hash_bucket(bucket, &flags);
}
+static struct dma_debug_entry *__dma_entry_alloc(void)
+{
+ struct dma_debug_entry *entry;
+
+ entry = list_entry(free_entries.next, struct dma_debug_entry, list);
+ list_del(&entry->list);
+ memset(entry, 0, sizeof(*entry));
+
+ num_free_entries -= 1;
+ if (num_free_entries < min_free_entries)
+ min_free_entries = num_free_entries;
+
+ return entry;
+}
+
/* struct dma_entry allocator
*
* The next two functions implement the allocator for
@@ -270,15 +377,12 @@ static struct dma_debug_entry *dma_entry_alloc(void)
spin_lock_irqsave(&free_entries_lock, flags);
if (list_empty(&free_entries)) {
- printk(KERN_ERR "DMA-API: debugging out of memory "
- "- disabling\n");
+ pr_err("DMA-API: debugging out of memory - disabling\n");
global_disable = true;
goto out;
}
- entry = list_entry(free_entries.next, struct dma_debug_entry, list);
- list_del(&entry->list);
- memset(entry, 0, sizeof(*entry));
+ entry = __dma_entry_alloc();
#ifdef CONFIG_STACKTRACE
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
@@ -286,9 +390,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
entry->stacktrace.skip = 2;
save_stack_trace(&entry->stacktrace);
#endif
- num_free_entries -= 1;
- if (num_free_entries < min_free_entries)
- min_free_entries = num_free_entries;
out:
spin_unlock_irqrestore(&free_entries_lock, flags);
@@ -310,6 +411,53 @@ static void dma_entry_free(struct dma_debug_entry *entry)
spin_unlock_irqrestore(&free_entries_lock, flags);
}
+int dma_debug_resize_entries(u32 num_entries)
+{
+ int i, delta, ret = 0;
+ unsigned long flags;
+ struct dma_debug_entry *entry;
+ LIST_HEAD(tmp);
+
+ spin_lock_irqsave(&free_entries_lock, flags);
+
+ if (nr_total_entries < num_entries) {
+ delta = num_entries - nr_total_entries;
+
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+
+ for (i = 0; i < delta; i++) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ break;
+
+ list_add_tail(&entry->list, &tmp);
+ }
+
+ spin_lock_irqsave(&free_entries_lock, flags);
+
+ list_splice(&tmp, &free_entries);
+ nr_total_entries += i;
+ num_free_entries += i;
+ } else {
+ delta = nr_total_entries - num_entries;
+
+ for (i = 0; i < delta && !list_empty(&free_entries); i++) {
+ entry = __dma_entry_alloc();
+ kfree(entry);
+ }
+
+ nr_total_entries -= i;
+ }
+
+ if (nr_total_entries != num_entries)
+ ret = 1;
+
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_debug_resize_entries);
+
/*
* DMA-API debugging init code
*
@@ -334,8 +482,7 @@ static int prealloc_memory(u32 num_entries)
num_free_entries = num_entries;
min_free_entries = num_entries;
- printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
- num_entries);
+ pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
return 0;
@@ -349,11 +496,102 @@ out_err:
return -ENOMEM;
}
+static ssize_t filter_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[NAME_MAX_LEN + 1];
+ unsigned long flags;
+ int len;
+
+ if (!current_driver_name[0])
+ return 0;
+
+ /*
+ * We can't copy to userspace directly because current_driver_name can
+ * only be read under the driver_name_lock with irqs disabled. So
+ * create a temporary copy first.
+ */
+ read_lock_irqsave(&driver_name_lock, flags);
+ len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
+ read_unlock_irqrestore(&driver_name_lock, flags);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t filter_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[NAME_MAX_LEN];
+ unsigned long flags;
+ size_t len;
+ int i;
+
+ /*
+ * We can't copy from userspace directly. Access to
+ * current_driver_name is protected with a write_lock with irqs
+ * disabled. Since copy_from_user can fault and may sleep we
+ * need to copy to temporary buffer first
+ */
+ len = min(count, (size_t)(NAME_MAX_LEN - 1));
+ if (copy_from_user(buf, userbuf, len))
+ return -EFAULT;
+
+ buf[len] = 0;
+
+ write_lock_irqsave(&driver_name_lock, flags);
+
+ /*
+ * Now handle the string we got from userspace very carefully.
+ * The rules are:
+ * - only use the first token we got
+ * - token delimiter is everything looking like a space
+ * character (' ', '\n', '\t' ...)
+ *
+ */
+ if (!isalnum(buf[0])) {
+ /*
+ * If the first character userspace gave us is not
+ * alphanumerical then assume the filter should be
+ * switched off.
+ */
+ if (current_driver_name[0])
+ pr_info("DMA-API: switching off dma-debug driver filter\n");
+ current_driver_name[0] = 0;
+ current_driver = NULL;
+ goto out_unlock;
+ }
+
+ /*
+ * Now parse out the first token and use it as the name for the
+ * driver to filter for.
+ */
+ for (i = 0; i < NAME_MAX_LEN; ++i) {
+ current_driver_name[i] = buf[i];
+ if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
+ break;
+ }
+ current_driver_name[i] = 0;
+ current_driver = NULL;
+
+ pr_info("DMA-API: enable driver filter for driver [%s]\n",
+ current_driver_name);
+
+out_unlock:
+ write_unlock_irqrestore(&driver_name_lock, flags);
+
+ return count;
+}
+
+const struct file_operations filter_fops = {
+ .read = filter_read,
+ .write = filter_write,
+};
+
static int dma_debug_fs_init(void)
{
dma_debug_dent = debugfs_create_dir("dma-api", NULL);
if (!dma_debug_dent) {
- printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
+ pr_err("DMA-API: can not create debugfs directory\n");
return -ENOMEM;
}
@@ -392,6 +630,11 @@ static int dma_debug_fs_init(void)
if (!min_free_entries_dent)
goto out_err;
+ filter_dent = debugfs_create_file("driver_filter", 0644,
+ dma_debug_dent, NULL, &filter_fops);
+ if (!filter_dent)
+ goto out_err;
+
return 0;
out_err:
@@ -400,9 +643,64 @@ out_err:
return -ENOMEM;
}
+static int device_dma_allocations(struct device *dev)
+{
+ struct dma_debug_entry *entry;
+ unsigned long flags;
+ int count = 0, i;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < HASH_SIZE; ++i) {
+ spin_lock(&dma_entry_hash[i].lock);
+ list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
+ if (entry->dev == dev)
+ count += 1;
+ }
+ spin_unlock(&dma_entry_hash[i].lock);
+ }
+
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static int dma_debug_device_change(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ int count;
+
+
+ switch (action) {
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ count = device_dma_allocations(dev);
+ if (count == 0)
+ break;
+ err_printk(dev, NULL, "DMA-API: device driver has pending "
+ "DMA allocations while released from device "
+ "[count=%d]\n", count);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
void dma_debug_add_bus(struct bus_type *bus)
{
- /* FIXME: register notifier */
+ struct notifier_block *nb;
+
+ nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+ if (nb == NULL) {
+ pr_err("dma_debug_add_bus: out of memory\n");
+ return;
+ }
+
+ nb->notifier_call = dma_debug_device_change;
+
+ bus_register_notifier(bus, nb);
}
/*
@@ -421,8 +719,7 @@ void dma_debug_init(u32 num_entries)
}
if (dma_debug_fs_init() != 0) {
- printk(KERN_ERR "DMA-API: error creating debugfs entries "
- "- disabling\n");
+ pr_err("DMA-API: error creating debugfs entries - disabling\n");
global_disable = true;
return;
@@ -432,14 +729,15 @@ void dma_debug_init(u32 num_entries)
num_entries = req_entries;
if (prealloc_memory(num_entries) != 0) {
- printk(KERN_ERR "DMA-API: debugging out of memory error "
- "- disabled\n");
+ pr_err("DMA-API: debugging out of memory error - disabled\n");
global_disable = true;
return;
}
- printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
+ nr_total_entries = num_free_entries;
+
+ pr_info("DMA-API: debugging enabled by kernel config\n");
}
static __init int dma_debug_cmdline(char *str)
@@ -448,8 +746,7 @@ static __init int dma_debug_cmdline(char *str)
return -EINVAL;
if (strncmp(str, "off", 3) == 0) {
- printk(KERN_INFO "DMA-API: debugging disabled on kernel "
- "command line\n");
+ pr_info("DMA-API: debugging disabled on kernel command line\n");
global_disable = true;
}
@@ -723,15 +1020,15 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
entry->type = dma_debug_sg;
entry->dev = dev;
entry->paddr = sg_phys(s);
- entry->size = s->length;
- entry->dev_addr = s->dma_address;
+ entry->size = sg_dma_len(s);
+ entry->dev_addr = sg_dma_address(s);
entry->direction = direction;
entry->sg_call_ents = nents;
entry->sg_mapped_ents = mapped_ents;
if (!PageHighMem(sg_page(s))) {
check_for_stack(dev, sg_virt(s));
- check_for_illegal_area(dev, sg_virt(s), s->length);
+ check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
}
add_dma_entry(entry);
@@ -739,13 +1036,33 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
}
EXPORT_SYMBOL(debug_dma_map_sg);
+static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
+{
+ struct dma_debug_entry *entry, ref;
+ struct hash_bucket *bucket;
+ unsigned long flags;
+ int mapped_ents;
+
+ ref.dev = dev;
+ ref.dev_addr = sg_dma_address(s);
+ ref.size = sg_dma_len(s),
+
+ bucket = get_hash_bucket(&ref, &flags);
+ entry = hash_bucket_find(bucket, &ref);
+ mapped_ents = 0;
+
+ if (entry)
+ mapped_ents = entry->sg_mapped_ents;
+ put_hash_bucket(bucket, &flags);
+
+ return mapped_ents;
+}
+
void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir)
{
- struct dma_debug_entry *entry;
struct scatterlist *s;
int mapped_ents = 0, i;
- unsigned long flags;
if (unlikely(global_disable))
return;
@@ -756,8 +1073,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
.type = dma_debug_sg,
.dev = dev,
.paddr = sg_phys(s),
- .dev_addr = s->dma_address,
- .size = s->length,
+ .dev_addr = sg_dma_address(s),
+ .size = sg_dma_len(s),
.direction = dir,
.sg_call_ents = 0,
};
@@ -765,14 +1082,9 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
if (mapped_ents && i >= mapped_ents)
break;
- if (mapped_ents == 0) {
- struct hash_bucket *bucket;
+ if (!i) {
ref.sg_call_ents = nelems;
- bucket = get_hash_bucket(&ref, &flags);
- entry = hash_bucket_find(bucket, &ref);
- if (entry)
- mapped_ents = entry->sg_mapped_ents;
- put_hash_bucket(bucket, &flags);
+ mapped_ents = get_nr_mapped_entries(dev, s);
}
check_unmap(&ref);
@@ -874,14 +1186,20 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, int direction)
{
struct scatterlist *s;
- int i;
+ int mapped_ents = 0, i;
if (unlikely(global_disable))
return;
for_each_sg(sg, s, nelems, i) {
- check_sync(dev, s->dma_address, s->dma_length, 0,
- direction, true);
+ if (!i)
+ mapped_ents = get_nr_mapped_entries(dev, s);
+
+ if (i >= mapped_ents)
+ break;
+
+ check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
+ direction, true);
}
}
EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -890,15 +1208,39 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, int direction)
{
struct scatterlist *s;
- int i;
+ int mapped_ents = 0, i;
if (unlikely(global_disable))
return;
for_each_sg(sg, s, nelems, i) {
- check_sync(dev, s->dma_address, s->dma_length, 0,
- direction, false);
+ if (!i)
+ mapped_ents = get_nr_mapped_entries(dev, s);
+
+ if (i >= mapped_ents)
+ break;
+
+ check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
+ direction, false);
}
}
EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
+static int __init dma_debug_driver_setup(char *str)
+{
+ int i;
+
+ for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
+ current_driver_name[i] = *str;
+ if (*str == 0)
+ break;
+ }
+
+ if (current_driver_name[0])
+ pr_info("DMA-API: enable driver filter for driver [%s]\n",
+ current_driver_name);
+
+
+ return 1;
+}
+__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/lib/extable.c b/lib/extable.c
index 179c087..4cac81e 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -39,7 +39,26 @@ void sort_extable(struct exception_table_entry *start,
sort(start, finish - start, sizeof(struct exception_table_entry),
cmp_ex, NULL);
}
-#endif
+
+#ifdef CONFIG_MODULES
+/*
+ * If the exception table is sorted, any referring to the module init
+ * will be at the beginning or the end.
+ */
+void trim_init_extable(struct module *m)
+{
+ /*trim the beginning*/
+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
+ m->extable++;
+ m->num_exentries--;
+ }
+ /*trim the end*/
+ while (m->num_exentries &&
+ within_module_init(m->extable[m->num_exentries-1].insn, m))
+ m->num_exentries--;
+}
+#endif /* CONFIG_MODULES */
+#endif /* !ARCH_HAS_SORT_EXTABLE */
#ifndef ARCH_HAS_SEARCH_EXTABLE
/*
diff --git a/lib/genalloc.c b/lib/genalloc.c
index f6d276d..eed2bdb 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool)
int bit, end_bit;
- write_lock(&pool->lock);
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f07c0db..39af256 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%16.16llx ", (unsigned long long)*(ptr8 + j));
+ "%s%16.16llx", j ? " " : "",
+ (unsigned long long)*(ptr8 + j));
ascii_column = 17 * ngroups + 2;
break;
}
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%8.8x ", *(ptr4 + j));
+ "%s%8.8x", j ? " " : "", *(ptr4 + j));
ascii_column = 9 * ngroups + 2;
break;
}
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%4.4x ", *(ptr2 + j));
+ "%s%4.4x", j ? " " : "", *(ptr2 + j));
ascii_column = 5 * ngroups + 2;
break;
}
default:
- for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
- j++) {
+ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
linebuf[lx++] = hex_asc_lo(ch);
linebuf[lx++] = ' ';
}
+ if (j)
+ lx--;
+
ascii_column = 3 * rowsize + 2;
break;
}
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
linebuf[lx++] = ' ';
- for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++)
+ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
: '.';
nil:
diff --git a/lib/kobject.c b/lib/kobject.c
index bacf6fe..b512b74 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name,
struct kobject *parent_kobj)
{
struct kset *kset;
+ int retval;
kset = kzalloc(sizeof(*kset), GFP_KERNEL);
if (!kset)
return NULL;
- kobject_set_name(&kset->kobj, name);
+ retval = kobject_set_name(&kset->kobj, name);
+ if (retval) {
+ kfree(kset);
+ return NULL;
+ }
kset->uevent_ops = uevent_ops;
kset->kobj.parent = parent_kobj;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 4bb42a0..23abbd9 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_insert);
-/**
- * radix_tree_lookup_slot - lookup a slot in a radix tree
- * @root: radix tree root
- * @index: index key
- *
- * Returns: the slot corresponding to the position @index in the
- * radix tree @root. This is useful for update-if-exists operations.
- *
- * This function can be called under rcu_read_lock iff the slot is not
- * modified by radix_tree_replace_slot, otherwise it must be called
- * exclusive from other writers. Any dereference of the slot must be done
- * using radix_tree_deref_slot.
+/*
+ * is_slot == 1 : search for the slot.
+ * is_slot == 0 : search for the node.
*/
-void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
+static void *radix_tree_lookup_element(struct radix_tree_root *root,
+ unsigned long index, int is_slot)
{
unsigned int height, shift;
struct radix_tree_node *node, **slot;
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
if (!radix_tree_is_indirect_ptr(node)) {
if (index > 0)
return NULL;
- return (void **)&root->rnode;
+ return is_slot ? (void *)&root->rnode : node;
}
node = radix_tree_indirect_to_ptr(node);
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
height--;
} while (height > 0);
- return (void **)slot;
+ return is_slot ? (void *)slot:node;
+}
+
+/**
+ * radix_tree_lookup_slot - lookup a slot in a radix tree
+ * @root: radix tree root
+ * @index: index key
+ *
+ * Returns: the slot corresponding to the position @index in the
+ * radix tree @root. This is useful for update-if-exists operations.
+ *
+ * This function can be called under rcu_read_lock iff the slot is not
+ * modified by radix_tree_replace_slot, otherwise it must be called
+ * exclusive from other writers. Any dereference of the slot must be done
+ * using radix_tree_deref_slot.
+ */
+void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
+{
+ return (void **)radix_tree_lookup_element(root, index, 1);
}
EXPORT_SYMBOL(radix_tree_lookup_slot);
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
*/
void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
{
- unsigned int height, shift;
- struct radix_tree_node *node, **slot;
-
- node = rcu_dereference(root->rnode);
- if (node == NULL)
- return NULL;
-
- if (!radix_tree_is_indirect_ptr(node)) {
- if (index > 0)
- return NULL;
- return node;
- }
- node = radix_tree_indirect_to_ptr(node);
-
- height = node->height;
- if (index > radix_tree_maxindex(height))
- return NULL;
-
- shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-
- do {
- slot = (struct radix_tree_node **)
- (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
- node = rcu_dereference(*slot);
- if (node == NULL)
- return NULL;
-
- shift -= RADIX_TREE_MAP_SHIFT;
- height--;
- } while (height > 0);
-
- return node;
+ return radix_tree_lookup_element(root, index, 0);
}
EXPORT_SYMBOL(radix_tree_lookup);
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_next_hole);
+/**
+ * radix_tree_prev_hole - find the prev hole (not-present entry)
+ * @root: tree root
+ * @index: index key
+ * @max_scan: maximum range to search
+ *
+ * Search backwards in the range [max(index-max_scan+1, 0), index]
+ * for the first hole.
+ *
+ * Returns: the index of the hole if found, otherwise returns an index
+ * outside of the set specified (in which case 'index - return >= max_scan'
+ * will be true). In rare cases of wrap-around, LONG_MAX will be returned.
+ *
+ * radix_tree_next_hole may be called under rcu_read_lock. However, like
+ * radix_tree_gang_lookup, this will not atomically search a snapshot of
+ * the tree at a single point in time. For example, if a hole is created
+ * at index 10, then subsequently a hole is created at index 5,
+ * radix_tree_prev_hole covering both indexes may return 5 if called under
+ * rcu_read_lock.
+ */
+unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+ unsigned long index, unsigned long max_scan)
+{
+ unsigned long i;
+
+ for (i = 0; i < max_scan; i++) {
+ if (!radix_tree_lookup(root, index))
+ break;
+ index--;
+ if (index == LONG_MAX)
+ break;
+ }
+
+ return index;
+}
+EXPORT_SYMBOL(radix_tree_prev_hole);
+
static unsigned int
__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
unsigned int max_items, unsigned long *next_index)
diff --git a/lib/rational.c b/lib/rational.c
new file mode 100644
index 0000000..b3c099b
--- /dev/null
+++ b/lib/rational.c
@@ -0,0 +1,62 @@
+/*
+ * rational fractions
+ *
+ * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com>
+ *
+ * helper functions when coping with rational numbers
+ */
+
+#include <linux/rational.h>
+
+/*
+ * calculate best rational approximation for a given fraction
+ * taking into account restricted register size, e.g. to find
+ * appropriate values for a pll with 5 bit denominator and
+ * 8 bit numerator register fields, trying to set up with a
+ * frequency ratio of 3.1415, one would say:
+ *
+ * rational_best_approximation(31415, 10000,
+ * (1 << 8) - 1, (1 << 5) - 1, &n, &d);
+ *
+ * you may look at given_numerator as a fixed point number,
+ * with the fractional part size described in given_denominator.
+ *
+ * for theoretical background, see:
+ * http://en.wikipedia.org/wiki/Continued_fraction
+ */
+
+void rational_best_approximation(
+ unsigned long given_numerator, unsigned long given_denominator,
+ unsigned long max_numerator, unsigned long max_denominator,
+ unsigned long *best_numerator, unsigned long *best_denominator)
+{
+ unsigned long n, d, n0, d0, n1, d1;
+ n = given_numerator;
+ d = given_denominator;
+ n0 = d1 = 0;
+ n1 = d0 = 1;
+ for (;;) {
+ unsigned long t, a;
+ if ((n1 > max_numerator) || (d1 > max_denominator)) {
+ n1 = n0;
+ d1 = d0;
+ break;
+ }
+ if (d == 0)
+ break;
+ t = d;
+ a = n / d;
+ d = n % d;
+ n = t;
+ t = n0 + a * n1;
+ n0 = n1;
+ n1 = t;
+ t = d0 + a * d1;
+ d0 = d1;
+ d1 = t;
+ }
+ *best_numerator = n1;
+ *best_denominator = d1;
+}
+
+EXPORT_SYMBOL(rational_best_approximation);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index f653659..e2aa3be 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
node = node->rb_right;
while ((left = node->rb_left) != NULL)
node = left;
+
+ if (rb_parent(old)) {
+ if (rb_parent(old)->rb_left == old)
+ rb_parent(old)->rb_left = node;
+ else
+ rb_parent(old)->rb_right = node;
+ } else
+ root->rb_node = node;
+
child = node->rb_right;
parent = rb_parent(node);
color = rb_color(node);
- if (child)
- rb_set_parent(child, parent);
if (parent == old) {
- parent->rb_right = child;
parent = node;
- } else
+ } else {
+ if (child)
+ rb_set_parent(child, parent);
parent->rb_left = child;
+ node->rb_right = old->rb_right;
+ rb_set_parent(old->rb_right, node);
+ }
+
node->rb_parent_color = old->rb_parent_color;
- node->rb_right = old->rb_right;
node->rb_left = old->rb_left;
-
- if (rb_parent(old))
- {
- if (rb_parent(old)->rb_left == old)
- rb_parent(old)->rb_left = node;
- else
- rb_parent(old)->rb_right = node;
- } else
- root->rb_node = node;
-
rb_set_parent(old->rb_left, node);
- if (old->rb_right)
- rb_set_parent(old->rb_right, node);
+
goto color;
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 2b0b5a7..bffe6d7 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,8 +60,8 @@ enum dma_sync_target {
int swiotlb_force;
/*
- * Used to do a quick range check in swiotlb_unmap_single and
- * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
+ * Used to do a quick range check in unmap_single and
+ * sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static char *io_tlb_start, *io_tlb_end;
@@ -129,7 +129,7 @@ dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
return paddr;
}
-phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
+phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
{
return baddr;
}
@@ -140,9 +140,15 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
}
-static void *swiotlb_bus_to_virt(dma_addr_t address)
+void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
{
- return phys_to_virt(swiotlb_bus_to_phys(address));
+ return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
+}
+
+int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
+ dma_addr_t addr, size_t size)
+{
+ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
}
int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
@@ -309,10 +315,10 @@ cleanup1:
return -ENOMEM;
}
-static int
+static inline int
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
{
- return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+ return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
}
static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
@@ -341,7 +347,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
unsigned long flags;
while (size) {
- sz = min(PAGE_SIZE - offset, size);
+ sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn),
@@ -476,7 +482,7 @@ found:
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
static void
-unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
+do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -560,7 +566,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
size)) {
/*
* The allocated memory isn't reachable by the device.
- * Fall back on swiotlb_map_single().
*/
free_pages((unsigned long) ret, order);
ret = NULL;
@@ -568,9 +573,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (!ret) {
/*
* We are either out of memory or the device can't DMA
- * to GFP_DMA memory; fall back on
- * swiotlb_map_single(), which will grab memory from
- * the lowest available address range.
+ * to GFP_DMA memory; fall back on map_single(), which
+ * will grab memory from the lowest available address range.
*/
ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
if (!ret)
@@ -587,7 +591,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
(unsigned long long)dev_addr);
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
+ do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
return NULL;
}
*dma_handle = dev_addr;
@@ -604,7 +608,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
free_pages((unsigned long) vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
+ do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
@@ -634,7 +638,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
- * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
+ * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
*/
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
@@ -642,18 +646,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
struct dma_attrs *attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
- void *ptr = page_address(page) + offset;
dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
void *map;
BUG_ON(dir == DMA_NONE);
/*
- * If the pointer passed in happens to be in the device's DMA window,
+ * If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
if (!address_needs_mapping(dev, dev_addr, size) &&
- !range_needs_mapping(virt_to_phys(ptr), size))
+ !range_needs_mapping(phys, size))
return dev_addr;
/*
@@ -679,23 +682,35 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous swiotlb_map_single call. All
+ * match what was provided for in a previous swiotlb_map_page call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
+static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir)
+{
+ char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
+
+ BUG_ON(dir == DMA_NONE);
+
+ if (is_swiotlb_buffer(dma_addr)) {
+ do_unmap_single(hwdev, dma_addr, size, dir);
+ return;
+ }
+
+ if (dir != DMA_FROM_DEVICE)
+ return;
+
+ dma_mark_clean(dma_addr, size);
+}
+
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- char *dma_addr = swiotlb_bus_to_virt(dev_addr);
-
- BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(dma_addr))
- unmap_single(hwdev, dma_addr, size, dir);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(dma_addr, size);
+ unmap_single(hwdev, dev_addr, size, dir);
}
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
@@ -703,7 +718,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer.
*
- * If you perform a swiotlb_map_single() but wish to interrogate the buffer
+ * If you perform a swiotlb_map_page() but wish to interrogate the buffer
* using the cpu, yet do not wish to teardown the dma mapping, you must
* call this function before doing so. At the next point you give the dma
* address back to the card, you must first perform a
@@ -713,13 +728,19 @@ static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, int target)
{
- char *dma_addr = swiotlb_bus_to_virt(dev_addr);
+ char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(dma_addr))
+
+ if (is_swiotlb_buffer(dma_addr)) {
sync_single(hwdev, dma_addr, size, dir, target);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(dma_addr, size);
+ return;
+ }
+
+ if (dir != DMA_FROM_DEVICE)
+ return;
+
+ dma_mark_clean(dma_addr, size);
}
void
@@ -746,13 +767,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size,
int dir, int target)
{
- char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
-
- BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(dma_addr))
- sync_single(hwdev, dma_addr, size, dir, target);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(dma_addr, size);
+ swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
}
void
@@ -777,7 +792,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above swiotlb_map_single
+ * This is the scatter-gather version of the above swiotlb_map_page
* interface. Here the scatter gather list elements are each tagged with the
* appropriate dma address and length. They are obtained via
* sg_dma_{address,length}(SG).
@@ -788,7 +803,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
- * Device ownership issues as mentioned above for swiotlb_map_single are the
+ * Device ownership issues as mentioned above for swiotlb_map_page are the
* same here.
*/
int
@@ -836,7 +851,7 @@ EXPORT_SYMBOL(swiotlb_map_sg);
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_single() above.
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
*/
void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
@@ -847,13 +862,9 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
- for_each_sg(sgl, sg, nelems, i) {
- if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
- unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
- sg->dma_length, dir);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
- }
+ for_each_sg(sgl, sg, nelems, i)
+ unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
+
}
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -879,15 +890,9 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
struct scatterlist *sg;
int i;
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i) {
- if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
- sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
+ for_each_sg(sgl, sg, nelems, i)
+ swiotlb_sync_single(hwdev, sg->dma_address,
sg->dma_length, dir, target);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
- }
}
void
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 7536ace..756ccaf 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -408,6 +408,8 @@ enum format_type {
FORMAT_TYPE_LONG_LONG,
FORMAT_TYPE_ULONG,
FORMAT_TYPE_LONG,
+ FORMAT_TYPE_UBYTE,
+ FORMAT_TYPE_BYTE,
FORMAT_TYPE_USHORT,
FORMAT_TYPE_SHORT,
FORMAT_TYPE_UINT,
@@ -573,12 +575,15 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
}
static char *symbol_string(char *buf, char *end, void *ptr,
- struct printf_spec spec)
+ struct printf_spec spec, char ext)
{
unsigned long value = (unsigned long) ptr;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
- sprint_symbol(sym, value);
+ if (ext != 'f')
+ sprint_symbol(sym, value);
+ else
+ kallsyms_lookup(value, NULL, NULL, NULL, sym);
return string(buf, end, sym, spec);
#else
spec.field_width = 2*sizeof(void *);
@@ -690,7 +695,8 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr,
*
* Right now we handle:
*
- * - 'F' For symbolic function descriptor pointers
+ * - 'F' For symbolic function descriptor pointers with offset
+ * - 'f' For simple symbolic function names without offset
* - 'S' For symbolic direct pointers
* - 'R' For a struct resource pointer, it prints the range of
* addresses (not the name nor the flags)
@@ -713,10 +719,11 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
switch (*fmt) {
case 'F':
+ case 'f':
ptr = dereference_function_descriptor(ptr);
/* Fallthrough */
case 'S':
- return symbol_string(buf, end, ptr, spec);
+ return symbol_string(buf, end, ptr, spec, *fmt);
case 'R':
return resource_string(buf, end, ptr, spec);
case 'm':
@@ -853,11 +860,15 @@ qualifier:
spec->qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
*fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
- spec->qualifier = *fmt;
- ++fmt;
- if (spec->qualifier == 'l' && *fmt == 'l') {
- spec->qualifier = 'L';
- ++fmt;
+ spec->qualifier = *fmt++;
+ if (unlikely(spec->qualifier == *fmt)) {
+ if (spec->qualifier == 'l') {
+ spec->qualifier = 'L';
+ ++fmt;
+ } else if (spec->qualifier == 'h') {
+ spec->qualifier = 'H';
+ ++fmt;
+ }
}
}
@@ -919,6 +930,11 @@ qualifier:
spec->type = FORMAT_TYPE_SIZE_T;
} else if (spec->qualifier == 't') {
spec->type = FORMAT_TYPE_PTRDIFF;
+ } else if (spec->qualifier == 'H') {
+ if (spec->flags & SIGN)
+ spec->type = FORMAT_TYPE_BYTE;
+ else
+ spec->type = FORMAT_TYPE_UBYTE;
} else if (spec->qualifier == 'h') {
if (spec->flags & SIGN)
spec->type = FORMAT_TYPE_SHORT;
@@ -943,7 +959,8 @@ qualifier:
*
* This function follows C99 vsnprintf, but has some extensions:
* %pS output the name of a text symbol
- * %pF output the name of a function pointer
+ * %pF output the name of a function pointer with its offset
+ * %pf output the name of a function pointer without its offset
* %pR output the address range in a struct resource
*
* The return value is the number of characters which would
@@ -1087,6 +1104,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
case FORMAT_TYPE_PTRDIFF:
num = va_arg(args, ptrdiff_t);
break;
+ case FORMAT_TYPE_UBYTE:
+ num = (unsigned char) va_arg(args, int);
+ break;
+ case FORMAT_TYPE_BYTE:
+ num = (signed char) va_arg(args, int);
+ break;
case FORMAT_TYPE_USHORT:
num = (unsigned short) va_arg(args, int);
break;
@@ -1363,6 +1386,10 @@ do { \
case FORMAT_TYPE_PTRDIFF:
save_arg(ptrdiff_t);
break;
+ case FORMAT_TYPE_UBYTE:
+ case FORMAT_TYPE_BYTE:
+ save_arg(char);
+ break;
case FORMAT_TYPE_USHORT:
case FORMAT_TYPE_SHORT:
save_arg(short);
@@ -1391,7 +1418,8 @@ EXPORT_SYMBOL_GPL(vbin_printf);
*
* The format follows C99 vsnprintf, but has some extensions:
* %pS output the name of a text symbol
- * %pF output the name of a function pointer
+ * %pF output the name of a function pointer with its offset
+ * %pf output the name of a function pointer without its offset
* %pR output the address range in a struct resource
* %n is ignored
*
@@ -1538,6 +1566,12 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
case FORMAT_TYPE_PTRDIFF:
num = get_arg(ptrdiff_t);
break;
+ case FORMAT_TYPE_UBYTE:
+ num = get_arg(unsigned char);
+ break;
+ case FORMAT_TYPE_BYTE:
+ num = get_arg(signed char);
+ break;
case FORMAT_TYPE_USHORT:
num = get_arg(unsigned short);
break;
OpenPOWER on IntegriCloud