summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-04-19 17:17:34 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-04-19 17:17:34 +0100
commitcf816ecb533ab96b883dfdc0db174598b5b5c4d2 (patch)
tree1b7705db288ae2917105e624b01fdf81e0882bf1 /lib
parentadf6d34e460387ee3e8f1e1875d52bff51212c7d (diff)
parent15f7d677ccff6f0f5de8a1ee43a792567e9f9de9 (diff)
downloadop-kernel-dev-cf816ecb533ab96b883dfdc0db174598b5b5c4d2.zip
op-kernel-dev-cf816ecb533ab96b883dfdc0db174598b5b5c4d2.tar.gz
Merge branch 'merge-fixes' into devel
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/Kconfig.kgdb58
-rw-r--r--lib/Makefile2
-rw-r--r--lib/kernel_lock.c1
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/pcounter.c58
-rw-r--r--lib/scatterlist.c102
-rw-r--r--lib/semaphore-sleepers.c176
8 files changed, 180 insertions, 250 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a..95de310 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -211,7 +211,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS
default n
bool "Enable SLUB performance statistics"
- depends on SLUB
+ depends on SLUB && SLUB_DEBUG && SYSFS
help
SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be
@@ -265,16 +265,6 @@ config DEBUG_MUTEXES
This feature allows mutex semantics violations to be detected and
reported.
-config DEBUG_SEMAPHORE
- bool "Semaphore debugging"
- depends on DEBUG_KERNEL
- depends on ALPHA || FRV
- default n
- help
- If you say Y here then semaphore processing will issue lots of
- verbose debugging messages. If you suspect a semaphore problem or a
- kernel hacker asks for this option then say Y. Otherwise say N.
-
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -593,7 +583,7 @@ config LATENCYTOP
to find out which userspace is blocking on what kernel operations.
config PROVIDE_OHCI1394_DMA_INIT
- bool "Provide code for enabling DMA over FireWire early on boot"
+ bool "Remote debugging over FireWire early on boot"
depends on PCI && X86
help
If you want to debug problems which hang or crash the kernel early
@@ -621,4 +611,17 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.
+config FIREWIRE_OHCI_REMOTE_DMA
+ bool "Remote debugging over FireWire with firewire-ohci"
+ depends on FIREWIRE_OHCI
+ help
+ This option lets you use the FireWire bus for remote debugging
+ with help of the firewire-ohci driver. It enables unfiltered
+ remote DMA in firewire-ohci.
+ See Documentation/debugging-via-ohci1394.txt for more information.
+
+ If unsure, say N.
+
source "samples/Kconfig"
+
+source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
new file mode 100644
index 0000000..f2e01ac
--- /dev/null
+++ b/lib/Kconfig.kgdb
@@ -0,0 +1,58 @@
+
+menuconfig KGDB
+ bool "KGDB: kernel debugging with remote gdb"
+ select FRAME_POINTER
+ depends on HAVE_ARCH_KGDB
+ depends on DEBUG_KERNEL && EXPERIMENTAL
+ help
+ If you say Y here, it will be possible to remotely debug the
+ kernel using gdb. Documentation of kernel debugger is available
+ at http://kgdb.sourceforge.net as well as in DocBook form
+ in Documentation/DocBook/. If unsure, say N.
+
+config HAVE_ARCH_KGDB_SHADOW_INFO
+ bool
+
+config HAVE_ARCH_KGDB
+ bool
+
+config KGDB_SERIAL_CONSOLE
+ tristate "KGDB: use kgdb over the serial console"
+ depends on KGDB
+ select CONSOLE_POLL
+ select MAGIC_SYSRQ
+ default y
+ help
+ Share a serial console with kgdb. Sysrq-g must be used
+ to break in initially.
+
+config KGDB_TESTS
+ bool "KGDB: internal test suite"
+ depends on KGDB
+ default n
+ help
+ This is a kgdb I/O module specifically designed to test
+ kgdb's internal functions. This kgdb I/O module is
+ intended to for the development of new kgdb stubs
+ as well as regression testing the kgdb internals.
+ See the drivers/misc/kgdbts.c for the details about
+ the tests. The most basic of this I/O module is to boot
+ a kernel boot arguments "kgdbwait kgdbts=V1F100"
+
+config KGDB_TESTS_ON_BOOT
+ bool "KGDB: Run tests on boot"
+ depends on KGDB_TESTS
+ default n
+ help
+ Run the kgdb tests on boot up automatically without the need
+ to pass in a kernel parameter
+
+config KGDB_TESTS_BOOT_STRING
+ string "KGDB: which internal kgdb tests to run"
+ depends on KGDB_TESTS_ON_BOOT
+ default "V1F100"
+ help
+ This is the command string to send the kgdb test suite on
+ boot. See the drivers/misc/kgdbts.c for detailed
+ information about other strings you could use beyond the
+ default of V1F100.
diff --git a/lib/Makefile b/lib/Makefile
index 23de261..4d7649c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
@@ -61,7 +60,6 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
-obj-$(CONFIG_SMP) += pcounter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 812dbf0..fbc11a3 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,6 +8,7 @@
#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
+#include <asm/semaphore.h>
/*
* The 'big kernel semaphore'
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5b6d7f6..9fb6b86 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -15,11 +15,13 @@
*/
#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
-#include <linux/string.h>
-#include <linux/kobject.h>
#include <net/sock.h>
diff --git a/lib/pcounter.c b/lib/pcounter.c
deleted file mode 100644
index 9b56807..0000000
--- a/lib/pcounter.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Define default pcounter functions
- * Note that often used pcounters use dedicated functions to get a speed increase.
- * (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER)
- */
-
-#include <linux/module.h>
-#include <linux/pcounter.h>
-#include <linux/smp.h>
-#include <linux/cpumask.h>
-
-static void pcounter_dyn_add(struct pcounter *self, int inc)
-{
- per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc;
-}
-
-static int pcounter_dyn_getval(const struct pcounter *self, int cpu)
-{
- return per_cpu_ptr(self->per_cpu_values, cpu)[0];
-}
-
-int pcounter_getval(const struct pcounter *self)
-{
- int res = 0, cpu;
-
- for_each_possible_cpu(cpu)
- res += self->getval(self, cpu);
-
- return res;
-}
-EXPORT_SYMBOL_GPL(pcounter_getval);
-
-int pcounter_alloc(struct pcounter *self)
-{
- int rc = 0;
- if (self->add == NULL) {
- self->per_cpu_values = alloc_percpu(int);
- if (self->per_cpu_values != NULL) {
- self->add = pcounter_dyn_add;
- self->getval = pcounter_dyn_getval;
- } else
- rc = 1;
- }
- return rc;
-}
-EXPORT_SYMBOL_GPL(pcounter_alloc);
-
-void pcounter_free(struct pcounter *self)
-{
- if (self->per_cpu_values != NULL) {
- free_percpu(self->per_cpu_values);
- self->per_cpu_values = NULL;
- self->getval = NULL;
- self->add = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(pcounter_free);
-
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index acca4901..b80c211 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/highmem.h>
/**
* sg_next - return the next scatterlist entry in a list
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
+
+/**
+ * sg_copy_buffer - Copy data between a linear buffer and an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ * @to_buffer: transfer direction (non zero == from an sg list to a
+ * buffer, 0 == from a buffer to an sg list
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, int to_buffer)
+{
+ struct scatterlist *sg;
+ size_t buf_off = 0;
+ int i;
+
+ WARN_ON(!irqs_disabled());
+
+ for_each_sg(sgl, sg, nents, i) {
+ struct page *page;
+ int n = 0;
+ unsigned int sg_off = sg->offset;
+ unsigned int sg_copy = sg->length;
+
+ if (sg_copy > buflen)
+ sg_copy = buflen;
+ buflen -= sg_copy;
+
+ while (sg_copy > 0) {
+ unsigned int page_copy;
+ void *p;
+
+ page_copy = PAGE_SIZE - sg_off;
+ if (page_copy > sg_copy)
+ page_copy = sg_copy;
+
+ page = nth_page(sg_page(sg), n);
+ p = kmap_atomic(page, KM_BIO_SRC_IRQ);
+
+ if (to_buffer)
+ memcpy(buf + buf_off, p + sg_off, page_copy);
+ else {
+ memcpy(p + sg_off, buf + buf_off, page_copy);
+ flush_kernel_dcache_page(page);
+ }
+
+ kunmap_atomic(p, KM_BIO_SRC_IRQ);
+
+ buf_off += page_copy;
+ sg_off += page_copy;
+ if (sg_off == PAGE_SIZE) {
+ sg_off = 0;
+ n++;
+ }
+ sg_copy -= page_copy;
+ }
+
+ if (!buflen)
+ break;
+ }
+
+ return buf_off;
+}
+
+/**
+ * sg_copy_from_buffer - Copy from a linear buffer to an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen)
+{
+ return sg_copy_buffer(sgl, nents, buf, buflen, 0);
+}
+EXPORT_SYMBOL(sg_copy_from_buffer);
+
+/**
+ * sg_copy_to_buffer - Copy from an SG list to a linear buffer
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy to
+ * @buflen: The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen)
+{
+ return sg_copy_buffer(sgl, nents, buf, buflen, 1);
+}
+EXPORT_SYMBOL(sg_copy_to_buffer);
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782..0000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * i386 and x86-64 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
- wake_up(&sem->wait);
-}
-
-void __sched __down(struct semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * the wait_queue_head.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_UNINTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- tsk->state = TASK_RUNNING;
-}
-
-int __sched __down_interruptible(struct semaphore *sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
- if (signal_pending(current)) {
- retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * wait_queue_head. The "-1" is because we're
- * still hoping to get the semaphore.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_INTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- tsk->state = TASK_RUNNING;
- return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-int __down_trylock(struct semaphore *sem)
-{
- int sleepers;
- unsigned long flags;
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock in the
- * wait_queue_head.
- */
- if (!atomic_add_negative(sleepers, &sem->count)) {
- wake_up_locked(&sem->wait);
- }
-
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- return 1;
-}
OpenPOWER on IntegriCloud