summaryrefslogtreecommitdiffstats
path: root/arch/m68k/include/asm/bitops_no.h
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2009-01-16 21:58:10 +1000
committerGreg Ungerer <gerg@uclinux.org>2009-01-16 21:58:10 +1000
commit49148020bcb6910ce71417bd990a5ce7017f9bd3 (patch)
treee410cc433a69075a0254ee4000cb10d71df3a641 /arch/m68k/include/asm/bitops_no.h
parentae04d1401577bb63151480a053057de58b8e10bb (diff)
downloadop-kernel-dev-49148020bcb6910ce71417bd990a5ce7017f9bd3.zip
op-kernel-dev-49148020bcb6910ce71417bd990a5ce7017f9bd3.tar.gz
m68k,m68knommu: merge header files
Merge header files for m68k and m68knommu to the single location: arch/m68k/include/asm The majority of this patch was the result of the script that is included in the changelog below. The script was originally written by Arnd Bergman and exten by me to cover a few more files. When the header files differed the script uses the following: The original m68k file is named <file>_mm.h [mm for memory manager] The m68knommu file is named <file>_no.h [no for no memory manager] The files uses the following include guard: This include gaurd works as the m68knommu toolchain set the __uClinux__ symbol - so this should work in userspace too. Merging the header files for m68k and m68knommu exposes the (unexpected?) ABI differences thus it is easier to actually identify these and thus to fix them. The commit has been build tested with both a m68k and a m68knommu toolchain - with success. The commit has also been tested with "make headers_check" and this patch fixes make headers_check for m68knommu. The script used: TARGET=arch/m68k/include/asm SOURCE=arch/m68knommu/include/asm INCLUDE="cachectl.h errno.h fcntl.h hwtest.h ioctls.h ipcbuf.h \ linkage.h math-emu.h md.h mman.h movs.h msgbuf.h openprom.h \ oplib.h poll.h posix_types.h resource.h rtc.h sembuf.h shmbuf.h \ shm.h shmparam.h socket.h sockios.h spinlock.h statfs.h stat.h \ termbits.h termios.h tlb.h types.h user.h" EQUAL="auxvec.h cputime.h device.h emergency-restart.h futex.h \ ioctl.h irq_regs.h kdebug.h local.h mutex.h percpu.h \ sections.h topology.h" NOMUUFILES="anchor.h bootstd.h coldfire.h commproc.h dbg.h \ elia.h flat.h m5206sim.h m520xsim.h m523xsim.h m5249sim.h \ m5272sim.h m527xsim.h m528xsim.h m5307sim.h m532xsim.h \ m5407sim.h m68360_enet.h m68360.h m68360_pram.h m68360_quicc.h \ m68360_regs.h MC68328.h MC68332.h MC68EZ328.h MC68VZ328.h \ mcfcache.h mcfdma.h mcfmbus.h mcfne.h mcfpci.h mcfpit.h \ mcfsim.h mcfsmc.h mcftimer.h mcfuart.h mcfwdebug.h \ nettel.h quicc_simple.h smp.h" FILES="atomic.h bitops.h bootinfo.h bug.h bugs.h byteorder.h cache.h \ cacheflush.h checksum.h current.h delay.h div64.h \ dma-mapping.h dma.h elf.h entry.h fb.h fpu.h hardirq.h hw_irq.h io.h \ irq.h kmap_types.h machdep.h mc146818rtc.h mmu.h mmu_context.h \ module.h page.h page_offset.h param.h pci.h pgalloc.h \ pgtable.h processor.h ptrace.h scatterlist.h segment.h \ setup.h sigcontext.h siginfo.h signal.h string.h system.h swab.h \ thread_info.h timex.h tlbflush.h traps.h uaccess.h ucontext.h \ unaligned.h unistd.h" mergefile() { BASE=${1%.h} git mv ${SOURCE}/$1 ${TARGET}/${BASE}_no.h git mv ${TARGET}/$1 ${TARGET}/${BASE}_mm.h cat << EOF > ${TARGET}/$1 EOF git add ${TARGET}/$1 } set -e mkdir -p ${TARGET} git mv include/asm-m68k/* ${TARGET} rmdir include/asm-m68k git rm ${SOURCE}/Kbuild for F in $INCLUDE $EQUAL; do git rm ${SOURCE}/$F done for F in $NOMUUFILES; do git mv ${SOURCE}/$F ${TARGET}/$F done for F in $FILES ; do mergefile $F done rmdir arch/m68knommu/include/asm rmdir arch/m68knommu/include Cc: Arnd Bergmann <arnd@arndb.de> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/m68k/include/asm/bitops_no.h')
-rw-r--r--arch/m68k/include/asm/bitops_no.h337
1 files changed, 337 insertions, 0 deletions
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
new file mode 100644
index 0000000..9d3cbe5
--- /dev/null
+++ b/arch/m68k/include/asm/bitops_no.h
@@ -0,0 +1,337 @@
+#ifndef _M68KNOMMU_BITOPS_H
+#define _M68KNOMMU_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+#include <linux/compiler.h>
+#include <asm/byteorder.h> /* swab32 */
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#if defined (__mcfisaaplus__) || defined (__mcfisac__)
+static inline int ffs(unsigned int val)
+{
+ if (!val)
+ return 0;
+
+ asm volatile(
+ "bitrev %0\n\t"
+ "ff1 %0\n\t"
+ : "=d" (val)
+ : "0" (val)
+ );
+ val++;
+ return val;
+}
+
+static inline int __ffs(unsigned int val)
+{
+ asm volatile(
+ "bitrev %0\n\t"
+ "ff1 %0\n\t"
+ : "=d" (val)
+ : "0" (val)
+ );
+ return val;
+}
+
+#else
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__ffs.h>
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffz.h>
+
+static __inline__ void set_bit(int nr, volatile unsigned long * addr)
+{
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
+ : "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "%a0", "cc");
+#else
+ __asm__ __volatile__ ("bset %1,%0"
+ : "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "di" (nr)
+ : "cc");
+#endif
+}
+
+#define __set_bit(nr, addr) set_bit(nr, addr)
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
+{
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
+ : "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "%a0", "cc");
+#else
+ __asm__ __volatile__ ("bclr %1,%0"
+ : "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "di" (nr)
+ : "cc");
+#endif
+}
+
+#define __clear_bit(nr, addr) clear_bit(nr, addr)
+
+static __inline__ void change_bit(int nr, volatile unsigned long * addr)
+{
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
+ : "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "%a0", "cc");
+#else
+ __asm__ __volatile__ ("bchg %1,%0"
+ : "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "di" (nr)
+ : "cc");
+#endif
+}
+
+#define __change_bit(nr, addr) change_bit(nr, addr)
+
+static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
+{
+ char retval;
+
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "%a0");
+#else
+ __asm__ __volatile__ ("bset %2,%1; sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "di" (nr)
+ /* No clobber */);
+#endif
+
+ return retval;
+}
+
+#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
+
+static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
+{
+ char retval;
+
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "%a0");
+#else
+ __asm__ __volatile__ ("bclr %2,%1; sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "di" (nr)
+ /* No clobber */);
+#endif
+
+ return retval;
+}
+
+#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
+
+static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
+{
+ char retval;
+
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "%a0");
+#else
+ __asm__ __volatile__ ("bchg %2,%1; sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
+ : "di" (nr)
+ /* No clobber */);
+#endif
+
+ return retval;
+}
+
+#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
+{
+ return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
+{
+ int * a = (int *) addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ return ((mask & *a) != 0);
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr),(addr)) : \
+ __test_bit((nr),(addr)))
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+static __inline__ int ext2_set_bit(int nr, volatile void * addr)
+{
+ char retval;
+
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
+ : "d" (nr)
+ : "%a0");
+#else
+ __asm__ __volatile__ ("bset %2,%1; sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
+ : "di" (nr)
+ /* No clobber */);
+#endif
+
+ return retval;
+}
+
+static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
+{
+ char retval;
+
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
+ : "d" (nr)
+ : "%a0");
+#else
+ __asm__ __volatile__ ("bclr %2,%1; sne %0"
+ : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
+ : "di" (nr)
+ /* No clobber */);
+#endif
+
+ return retval;
+}
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_set_bit((nr), (addr)); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_clear_bit((nr), (addr)); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
+{
+ char retval;
+
+#ifdef CONFIG_COLDFIRE
+ __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
+ : "=d" (retval)
+ : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
+ : "%a0");
+#else
+ __asm__ __volatile__ ("btst %2,%1; sne %0"
+ : "=d" (retval)
+ : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
+ /* No clobber */);
+#endif
+
+ return retval;
+}
+
+#define ext2_find_first_zero_bit(addr, size) \
+ ext2_find_next_zero_bit((addr), (size), 0)
+
+static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if(offset) {
+ /* We hold the little endian value in tmp, but then the
+ * shift is illegal. So we could keep a big endian value
+ * in tmp, like this:
+ *
+ * tmp = __swab32(*(p++));
+ * tmp |= ~0UL >> (32-offset);
+ *
+ * but this would decrease performance, so we change the
+ * shift:
+ */
+ tmp = *(p++);
+ tmp |= __swab32(~0UL >> (32-offset));
+ if(size < 32)
+ goto found_first;
+ if(~tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while(size & ~31UL) {
+ if(~(tmp = *(p++)))
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if(!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ /* tmp is little endian, so we would have to swab the shift,
+ * see above. But then we have to swab tmp below for ffz, so
+ * we might as well do this here.
+ */
+ return result + ffz(__swab32(tmp) | (~0UL << size));
+found_middle:
+ return result + ffz(__swab32(tmp));
+}
+
+#define ext2_find_next_bit(addr, size, off) \
+ generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
+#include <asm-generic/bitops/minix.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+
+#endif /* _M68KNOMMU_BITOPS_H */
OpenPOWER on IntegriCloud