summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore6
-rw-r--r--lib/dec_and_lock.c35
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/swiotlb.c4
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
7 files changed, 47 insertions, 6 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
new file mode 100644
index 0000000..3bef1ea
--- /dev/null
+++ b/lib/.gitignore
@@ -0,0 +1,6 @@
+#
+# Generated files
+#
+gen_crc32table
+crc32table.h
+
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 2377af0..305a966 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -1,7 +1,41 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
+#include <asm/system.h>
+#ifdef __HAVE_ARCH_CMPXCHG
+/*
+ * This is an implementation of the notion of "decrement a
+ * reference count, and return locked if it decremented to zero".
+ *
+ * This implementation can be used on any architecture that
+ * has a cmpxchg, and where atomic->value is an int holding
+ * the value of the atomic (i.e. the high bits aren't used
+ * for a lock or anything like that).
+ */
+int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+{
+ int counter;
+ int newcount;
+
+ for (;;) {
+ counter = atomic_read(atomic);
+ newcount = counter - 1;
+ if (!newcount)
+ break; /* do it the slow way */
+
+ newcount = cmpxchg(&atomic->counter, counter, newcount);
+ if (newcount == counter)
+ return 0;
+ }
+
+ spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock(lock);
+ return 0;
+}
+#else
/*
* This is an architecture-neutral, but slow,
* implementation of the notion of "decrement
@@ -33,5 +67,6 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
spin_unlock(lock);
return 0;
}
+#endif
EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6a8bc6e..d1c057e7 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -110,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node)
* success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled.
*/
-int radix_tree_preload(unsigned int __nocast gfp_mask)
+int radix_tree_preload(gfp_t gfp_mask)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c2fc470..5bdeaae 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -142,8 +142,8 @@ swiotlb_init_with_default_size (size_t default_size)
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
- (1 << IO_TLB_SHIFT));
+ io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs *
+ (1 << IO_TLB_SHIFT), 0x100000000);
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 2cc79112..8a8b3a1 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -127,7 +127,7 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
- int gfp_mask)
+ gfp_t gfp_mask)
{
struct ts_config *conf;
struct ts_bm *bm;
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index d27c0a0..ca32112 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -258,7 +258,7 @@ found_match:
}
static struct ts_config *fsm_init(const void *pattern, unsigned int len,
- int gfp_mask)
+ gfp_t gfp_mask)
{
int i, err = -EINVAL;
struct ts_config *conf;
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 73266b9..7fd4545 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -87,7 +87,7 @@ static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
}
static struct ts_config *kmp_init(const void *pattern, unsigned int len,
- int gfp_mask)
+ gfp_t gfp_mask)
{
struct ts_config *conf;
struct ts_kmp *kmp;
OpenPOWER on IntegriCloud