summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 11:20:03 +0200
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 11:20:03 +0200
commit96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch)
treed947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-x86_64
parent27bd0c955648646abf2a353a8371d28c37bcd982 (diff)
downloadop-kernel-dev-96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42.zip
op-kernel-dev-96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42.tar.gz
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the header install make rules Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/8253pit.h10
-rw-r--r--include/asm-x86_64/Kbuild21
-rw-r--r--include/asm-x86_64/a.out.h28
-rw-r--r--include/asm-x86_64/acpi.h153
-rw-r--r--include/asm-x86_64/agp.h34
-rw-r--r--include/asm-x86_64/alternative-asm.i12
-rw-r--r--include/asm-x86_64/alternative.h159
-rw-r--r--include/asm-x86_64/apic.h107
-rw-r--r--include/asm-x86_64/apicdef.h392
-rw-r--r--include/asm-x86_64/atomic.h466
-rw-r--r--include/asm-x86_64/auxvec.h6
-rw-r--r--include/asm-x86_64/bitops.h427
-rw-r--r--include/asm-x86_64/boot.h1
-rw-r--r--include/asm-x86_64/bootparam.h1
-rw-r--r--include/asm-x86_64/bootsetup.h40
-rw-r--r--include/asm-x86_64/bug.h34
-rw-r--r--include/asm-x86_64/bugs.h6
-rw-r--r--include/asm-x86_64/byteorder.h33
-rw-r--r--include/asm-x86_64/cache.h26
-rw-r--r--include/asm-x86_64/cacheflush.h35
-rw-r--r--include/asm-x86_64/calgary.h72
-rw-r--r--include/asm-x86_64/calling.h162
-rw-r--r--include/asm-x86_64/checksum.h195
-rw-r--r--include/asm-x86_64/cmpxchg.h134
-rw-r--r--include/asm-x86_64/compat.h212
-rw-r--r--include/asm-x86_64/cpu.h1
-rw-r--r--include/asm-x86_64/cpufeature.h30
-rw-r--r--include/asm-x86_64/cputime.h6
-rw-r--r--include/asm-x86_64/current.h27
-rw-r--r--include/asm-x86_64/debugreg.h65
-rw-r--r--include/asm-x86_64/delay.h30
-rw-r--r--include/asm-x86_64/desc.h174
-rw-r--r--include/asm-x86_64/desc_defs.h69
-rw-r--r--include/asm-x86_64/device.h15
-rw-r--r--include/asm-x86_64/div64.h1
-rw-r--r--include/asm-x86_64/dma-mapping.h203
-rw-r--r--include/asm-x86_64/dma.h304
-rw-r--r--include/asm-x86_64/dmi.h24
-rw-r--r--include/asm-x86_64/dwarf2.h57
-rw-r--r--include/asm-x86_64/e820.h61
-rw-r--r--include/asm-x86_64/edac.h18
-rw-r--r--include/asm-x86_64/elf.h180
-rw-r--r--include/asm-x86_64/emergency-restart.h6
-rw-r--r--include/asm-x86_64/errno.h6
-rw-r--r--include/asm-x86_64/fb.h19
-rw-r--r--include/asm-x86_64/fcntl.h1
-rw-r--r--include/asm-x86_64/fixmap.h92
-rw-r--r--include/asm-x86_64/floppy.h283
-rw-r--r--include/asm-x86_64/fpu32.h10
-rw-r--r--include/asm-x86_64/futex.h125
-rw-r--r--include/asm-x86_64/genapic.h37
-rw-r--r--include/asm-x86_64/hardirq.h23
-rw-r--r--include/asm-x86_64/hpet.h18
-rw-r--r--include/asm-x86_64/hw_irq.h175
-rw-r--r--include/asm-x86_64/hypertransport.h1
-rw-r--r--include/asm-x86_64/i387.h209
-rw-r--r--include/asm-x86_64/i8253.h6
-rw-r--r--include/asm-x86_64/ia32.h178
-rw-r--r--include/asm-x86_64/ia32_unistd.h18
-rw-r--r--include/asm-x86_64/ide.h1
-rw-r--r--include/asm-x86_64/idle.h14
-rw-r--r--include/asm-x86_64/intel_arch_perfmon.h31
-rw-r--r--include/asm-x86_64/io.h276
-rw-r--r--include/asm-x86_64/io_apic.h136
-rw-r--r--include/asm-x86_64/ioctl.h1
-rw-r--r--include/asm-x86_64/ioctls.h86
-rw-r--r--include/asm-x86_64/iommu.h29
-rw-r--r--include/asm-x86_64/ipcbuf.h29
-rw-r--r--include/asm-x86_64/ipi.h128
-rw-r--r--include/asm-x86_64/irq.h51
-rw-r--r--include/asm-x86_64/irq_regs.h1
-rw-r--r--include/asm-x86_64/irqflags.h142
-rw-r--r--include/asm-x86_64/ist.h1
-rw-r--r--include/asm-x86_64/k8.h14
-rw-r--r--include/asm-x86_64/kdebug.h36
-rw-r--r--include/asm-x86_64/kexec.h94
-rw-r--r--include/asm-x86_64/kmap_types.h19
-rw-r--r--include/asm-x86_64/kprobes.h90
-rw-r--r--include/asm-x86_64/ldt.h36
-rw-r--r--include/asm-x86_64/linkage.h6
-rw-r--r--include/asm-x86_64/local.h222
-rw-r--r--include/asm-x86_64/mach_apic.h29
-rw-r--r--include/asm-x86_64/mc146818rtc.h29
-rw-r--r--include/asm-x86_64/mce.h115
-rw-r--r--include/asm-x86_64/mman.h19
-rw-r--r--include/asm-x86_64/mmsegment.h8
-rw-r--r--include/asm-x86_64/mmu.h21
-rw-r--r--include/asm-x86_64/mmu_context.h74
-rw-r--r--include/asm-x86_64/mmzone.h56
-rw-r--r--include/asm-x86_64/module.h10
-rw-r--r--include/asm-x86_64/mpspec.h233
-rw-r--r--include/asm-x86_64/msgbuf.h27
-rw-r--r--include/asm-x86_64/msidef.h1
-rw-r--r--include/asm-x86_64/msr-index.h1
-rw-r--r--include/asm-x86_64/msr.h187
-rw-r--r--include/asm-x86_64/mtrr.h152
-rw-r--r--include/asm-x86_64/mutex.h105
-rw-r--r--include/asm-x86_64/namei.h11
-rw-r--r--include/asm-x86_64/nmi.h95
-rw-r--r--include/asm-x86_64/numa.h38
-rw-r--r--include/asm-x86_64/page.h143
-rw-r--r--include/asm-x86_64/param.h22
-rw-r--r--include/asm-x86_64/parport.h18
-rw-r--r--include/asm-x86_64/pci-direct.h17
-rw-r--r--include/asm-x86_64/pci.h126
-rw-r--r--include/asm-x86_64/pda.h125
-rw-r--r--include/asm-x86_64/percpu.h68
-rw-r--r--include/asm-x86_64/pgalloc.h119
-rw-r--r--include/asm-x86_64/pgtable.h432
-rw-r--r--include/asm-x86_64/poll.h1
-rw-r--r--include/asm-x86_64/posix_types.h119
-rw-r--r--include/asm-x86_64/prctl.h10
-rw-r--r--include/asm-x86_64/processor-flags.h1
-rw-r--r--include/asm-x86_64/processor.h439
-rw-r--r--include/asm-x86_64/proto.h104
-rw-r--r--include/asm-x86_64/ptrace-abi.h51
-rw-r--r--include/asm-x86_64/ptrace.h78
-rw-r--r--include/asm-x86_64/required-features.h46
-rw-r--r--include/asm-x86_64/resource.h6
-rw-r--r--include/asm-x86_64/resume-trace.h13
-rw-r--r--include/asm-x86_64/rio.h74
-rw-r--r--include/asm-x86_64/rtc.h10
-rw-r--r--include/asm-x86_64/rwlock.h26
-rw-r--r--include/asm-x86_64/scatterlist.h24
-rw-r--r--include/asm-x86_64/seccomp.h24
-rw-r--r--include/asm-x86_64/sections.h7
-rw-r--r--include/asm-x86_64/segment.h53
-rw-r--r--include/asm-x86_64/semaphore.h181
-rw-r--r--include/asm-x86_64/sembuf.h25
-rw-r--r--include/asm-x86_64/serial.h29
-rw-r--r--include/asm-x86_64/setup.h6
-rw-r--r--include/asm-x86_64/shmbuf.h38
-rw-r--r--include/asm-x86_64/shmparam.h6
-rw-r--r--include/asm-x86_64/sigcontext.h55
-rw-r--r--include/asm-x86_64/sigcontext32.h71
-rw-r--r--include/asm-x86_64/siginfo.h8
-rw-r--r--include/asm-x86_64/signal.h181
-rw-r--r--include/asm-x86_64/smp.h117
-rw-r--r--include/asm-x86_64/socket.h55
-rw-r--r--include/asm-x86_64/sockios.h13
-rw-r--r--include/asm-x86_64/sparsemem.h26
-rw-r--r--include/asm-x86_64/spinlock.h167
-rw-r--r--include/asm-x86_64/spinlock_types.h20
-rw-r--r--include/asm-x86_64/stacktrace.h20
-rw-r--r--include/asm-x86_64/stat.h44
-rw-r--r--include/asm-x86_64/statfs.h58
-rw-r--r--include/asm-x86_64/string.h60
-rw-r--r--include/asm-x86_64/suspend.h55
-rw-r--r--include/asm-x86_64/swiotlb.h56
-rw-r--r--include/asm-x86_64/system.h180
-rw-r--r--include/asm-x86_64/tce.h48
-rw-r--r--include/asm-x86_64/termbits.h198
-rw-r--r--include/asm-x86_64/termios.h90
-rw-r--r--include/asm-x86_64/therm_throt.h1
-rw-r--r--include/asm-x86_64/thread_info.h169
-rw-r--r--include/asm-x86_64/timex.h31
-rw-r--r--include/asm-x86_64/tlb.h13
-rw-r--r--include/asm-x86_64/tlbflush.h109
-rw-r--r--include/asm-x86_64/topology.h71
-rw-r--r--include/asm-x86_64/tsc.h1
-rw-r--r--include/asm-x86_64/types.h55
-rw-r--r--include/asm-x86_64/uaccess.h384
-rw-r--r--include/asm-x86_64/ucontext.h12
-rw-r--r--include/asm-x86_64/unaligned.h37
-rw-r--r--include/asm-x86_64/unistd.h687
-rw-r--r--include/asm-x86_64/unwind.h12
-rw-r--r--include/asm-x86_64/user.h114
-rw-r--r--include/asm-x86_64/user32.h69
-rw-r--r--include/asm-x86_64/vga.h20
-rw-r--r--include/asm-x86_64/vgtod.h29
-rw-r--r--include/asm-x86_64/vsyscall.h44
-rw-r--r--include/asm-x86_64/vsyscall32.h20
-rw-r--r--include/asm-x86_64/xor.h354
173 files changed, 0 insertions, 14087 deletions
diff --git a/include/asm-x86_64/8253pit.h b/include/asm-x86_64/8253pit.h
deleted file mode 100644
index 285f784..0000000
--- a/include/asm-x86_64/8253pit.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * 8253/8254 Programmable Interval Timer
- */
-
-#ifndef _8253PIT_H
-#define _8253PIT_H
-
-#define PIT_TICK_RATE 1193182UL
-
-#endif
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
deleted file mode 100644
index 75a2def..0000000
--- a/include/asm-x86_64/Kbuild
+++ /dev/null
@@ -1,21 +0,0 @@
-include include/asm-generic/Kbuild.asm
-
-ALTARCH := i386
-ARCHDEF := defined __x86_64__
-ALTARCHDEF := defined __i386__
-
-header-y += boot.h
-header-y += bootsetup.h
-header-y += debugreg.h
-header-y += ldt.h
-header-y += msr-index.h
-header-y += prctl.h
-header-y += ptrace-abi.h
-header-y += sigcontext32.h
-header-y += ucontext.h
-header-y += vsyscall32.h
-
-unifdef-y += mce.h
-unifdef-y += msr.h
-unifdef-y += mtrr.h
-unifdef-y += vsyscall.h
diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h
deleted file mode 100644
index e789300..0000000
--- a/include/asm-x86_64/a.out.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __X8664_A_OUT_H__
-#define __X8664_A_OUT_H__
-
-/* 32bit a.out */
-
-struct exec
-{
- unsigned int a_info; /* Use macros N_MAGIC, etc for access */
- unsigned a_text; /* length of text, in bytes */
- unsigned a_data; /* length of data, in bytes */
- unsigned a_bss; /* length of uninitialized data area for file, in bytes */
- unsigned a_syms; /* length of symbol table data in file, in bytes */
- unsigned a_entry; /* start address */
- unsigned a_trsize; /* length of relocation info for text, in bytes */
- unsigned a_drsize; /* length of relocation info for data, in bytes */
-};
-
-#define N_TRSIZE(a) ((a).a_trsize)
-#define N_DRSIZE(a) ((a).a_drsize)
-#define N_SYMSIZE(a) ((a).a_syms)
-
-#ifdef __KERNEL__
-#include <linux/thread_info.h>
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX TASK_SIZE64
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
deleted file mode 100644
index 9817335..0000000
--- a/include/asm-x86_64/acpi.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * asm-x86_64/acpi.h
- *
- * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#ifndef _ASM_ACPI_H
-#define _ASM_ACPI_H
-
-#ifdef __KERNEL__
-
-#include <acpi/pdc_intel.h>
-#include <asm/numa.h>
-
-#define COMPILER_DEPENDENT_INT64 long long
-#define COMPILER_DEPENDENT_UINT64 unsigned long long
-
-/*
- * Calling conventions:
- *
- * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
- * ACPI_EXTERNAL_XFACE - External ACPI interfaces
- * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
- * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
- */
-#define ACPI_SYSTEM_XFACE
-#define ACPI_EXTERNAL_XFACE
-#define ACPI_INTERNAL_XFACE
-#define ACPI_INTERNAL_VAR_XFACE
-
-/* Asm macros */
-
-#define ACPI_ASM_MACROS
-#define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() local_irq_disable()
-#define ACPI_ENABLE_IRQS() local_irq_enable()
-#define ACPI_FLUSH_CPU_CACHE() wbinvd()
-
-int __acpi_acquire_global_lock(unsigned int *lock);
-int __acpi_release_global_lock(unsigned int *lock);
-
-#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
- ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
-
-#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
- ((Acq) = __acpi_release_global_lock(&facs->global_lock))
-
-/*
- * Math helper asm macros
- */
-#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
- asm("divl %2;" \
- :"=a"(q32), "=d"(r32) \
- :"r"(d32), \
- "0"(n_lo), "1"(n_hi))
-
-
-#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
- asm("shrl $1,%2;" \
- "rcrl $1,%3;" \
- :"=r"(n_hi), "=r"(n_lo) \
- :"0"(n_hi), "1"(n_lo))
-
-#ifdef CONFIG_ACPI
-extern int acpi_lapic;
-extern int acpi_ioapic;
-extern int acpi_noirq;
-extern int acpi_strict;
-extern int acpi_disabled;
-extern int acpi_pci_disabled;
-extern int acpi_ht;
-static inline void disable_acpi(void)
-{
- acpi_disabled = 1;
- acpi_ht = 0;
- acpi_pci_disabled = 1;
- acpi_noirq = 1;
-}
-
-/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
-#define FIX_ACPI_PAGES 4
-
-extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
-static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
-static inline void acpi_disable_pci(void)
-{
- acpi_pci_disabled = 1;
- acpi_noirq_set();
-}
-extern int acpi_irq_balance_set(char *str);
-
-/* routines for saving/restoring kernel state */
-extern int acpi_save_state_mem(void);
-extern void acpi_restore_state_mem(void);
-
-extern unsigned long acpi_wakeup_address;
-
-/* early initialization routine */
-extern void acpi_reserve_bootmem(void);
-
-#else /* !CONFIG_ACPI */
-
-#define acpi_lapic 0
-#define acpi_ioapic 0
-static inline void acpi_noirq_set(void) { }
-static inline void acpi_disable_pci(void) { }
-
-#endif /* !CONFIG_ACPI */
-
-extern int acpi_numa;
-extern int acpi_scan_nodes(unsigned long start, unsigned long end);
-#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-
-extern int acpi_disabled;
-extern int acpi_pci_disabled;
-
-#define ARCH_HAS_POWER_INIT 1
-
-extern int acpi_skip_timer_override;
-extern int acpi_use_timer_override;
-
-#ifdef CONFIG_ACPI_NUMA
-extern void __init acpi_fake_nodes(const struct bootnode *fake_nodes,
- int num_nodes);
-#else
-static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
- int num_nodes)
-{
-}
-#endif
-
-#endif /*__KERNEL__*/
-
-#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h
deleted file mode 100644
index de33866..0000000
--- a/include/asm-x86_64/agp.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef AGP_H
-#define AGP_H 1
-
-#include <asm/cacheflush.h>
-
-/*
- * Functions to keep the agpgart mappings coherent.
- * The GART gives the CPU a physical alias of memory. The alias is
- * mapped uncacheable. Make sure there are no conflicting mappings
- * with different cachability attributes for the same page.
- */
-
-/* Caller's responsibility to call global_flush_tlb() for
- * performance reasons */
-#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
-#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
-#define flush_agp_mappings() global_flush_tlb()
-
-/* Could use CLFLUSH here if the cpu supports it. But then it would
- need to be called for each cacheline of the whole page so it may not be
- worth it. Would need a page for it. */
-#define flush_agp_cache() asm volatile("wbinvd":::"memory")
-
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
-/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-#define alloc_gatt_pages(order) \
- ((char *)__get_free_pages(GFP_KERNEL, (order)))
-#define free_gatt_pages(table, order) \
- free_pages((unsigned long)(table), (order))
-
-#endif
diff --git a/include/asm-x86_64/alternative-asm.i b/include/asm-x86_64/alternative-asm.i
deleted file mode 100644
index 0b3f1a2..0000000
--- a/include/asm-x86_64/alternative-asm.i
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifdef CONFIG_SMP
- .macro LOCK_PREFIX
-1: lock
- .section .smp_locks,"a"
- .align 8
- .quad 1b
- .previous
- .endm
-#else
- .macro LOCK_PREFIX
- .endm
-#endif
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
deleted file mode 100644
index ab161e8..0000000
--- a/include/asm-x86_64/alternative.h
+++ /dev/null
@@ -1,159 +0,0 @@
-#ifndef _X86_64_ALTERNATIVE_H
-#define _X86_64_ALTERNATIVE_H
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/stddef.h>
-
-/*
- * Alternative inline assembly for SMP.
- *
- * The LOCK_PREFIX macro defined here replaces the LOCK and
- * LOCK_PREFIX macros used everywhere in the source tree.
- *
- * SMP alternatives use the same data structures as the other
- * alternatives and the X86_FEATURE_UP flag to indicate the case of a
- * UP system running a SMP kernel. The existing apply_alternatives()
- * works fine for patching a SMP kernel for UP.
- *
- * The SMP alternative tables can be kept after boot and contain both
- * UP and SMP versions of the instructions to allow switching back to
- * SMP at runtime, when hotplugging in a new CPU, which is especially
- * useful in virtualized environments.
- *
- * The very common lock prefix is handled as special case in a
- * separate table which is a pure address list without replacement ptr
- * and size information. That keeps the table sizes small.
- */
-
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX \
- ".section .smp_locks,\"a\"\n" \
- " .align 8\n" \
- " .quad 661f\n" /* address */ \
- ".previous\n" \
- "661:\n\tlock; "
-
-#else /* ! CONFIG_SMP */
-#define LOCK_PREFIX ""
-#endif
-
-/* This must be included *after* the definition of LOCK_PREFIX */
-#include <asm/cpufeature.h>
-
-struct alt_instr {
- u8 *instr; /* original instruction */
- u8 *replacement;
- u8 cpuid; /* cpuid bit set for replacement */
- u8 instrlen; /* length of original instruction */
- u8 replacementlen; /* length of new instruction, <= instrlen */
- u8 pad[5];
-};
-
-extern void alternative_instructions(void);
-extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
-
-struct module;
-
-#ifdef CONFIG_SMP
-extern void alternatives_smp_module_add(struct module *mod, char *name,
- void *locks, void *locks_end,
- void *text, void *text_end);
-extern void alternatives_smp_module_del(struct module *mod);
-extern void alternatives_smp_switch(int smp);
-#else
-static inline void alternatives_smp_module_add(struct module *mod, char *name,
- void *locks, void *locks_end,
- void *text, void *text_end) {}
-static inline void alternatives_smp_module_del(struct module *mod) {}
-static inline void alternatives_smp_switch(int smp) {}
-#endif
-
-#endif
-
-/*
- * Alternative instructions for different CPU types or capabilities.
- *
- * This allows to use optimized instructions even on generic binary
- * kernels.
- *
- * length of oldinstr must be longer or equal the length of newinstr
- * It can be padded with nops as needed.
- *
- * For non barrier like inlines please define new variants
- * without volatile and memory clobber.
- */
-#define alternative(oldinstr, newinstr, feature) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 8\n" \
- " .quad 661b\n" /* label */ \
- " .quad 663f\n" /* new instruction */ \
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature) : "memory")
-
-/*
- * Alternative inline assembly with input.
- *
- * Pecularities:
- * No memory clobber here.
- * Argument numbers start with 1.
- * Best is to use constraints that are fixed size (like (%1) ... "r")
- * If you use variable sized constraints like "m" or "g" in the
- * replacement make sure to pad to the worst case length.
- */
-#define alternative_input(oldinstr, newinstr, feature, input...) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 8\n" \
- " .quad 661b\n" /* label */ \
- " .quad 663f\n" /* new instruction */ \
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature), ##input)
-
-/* Like alternative_input, but with a single output argument */
-#define alternative_io(oldinstr, newinstr, feature, output, input...) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 8\n" \
- " .quad 661b\n" /* label */ \
- " .quad 663f\n" /* new instruction */ \
- " .byte %c[feat]\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" : output : [feat] "i" (feature), ##input)
-
-/*
- * use this macro(s) if you need more than one output parameter
- * in alternative_io
- */
-#define ASM_OUTPUT2(a, b) a, b
-
-struct paravirt_patch;
-#ifdef CONFIG_PARAVIRT
-void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
-#else
-static inline void
-apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
-{}
-#define __parainstructions NULL
-#define __parainstructions_end NULL
-#endif
-
-extern void text_poke(void *addr, unsigned char *opcode, int len);
-
-#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
deleted file mode 100644
index 85125ef..0000000
--- a/include/asm-x86_64/apic.h
+++ /dev/null
@@ -1,107 +0,0 @@
-#ifndef __ASM_APIC_H
-#define __ASM_APIC_H
-
-#include <linux/pm.h>
-#include <linux/delay.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <asm/system.h>
-
-#define Dprintk(x...)
-
-/*
- * Debugging macros
- */
-#define APIC_QUIET 0
-#define APIC_VERBOSE 1
-#define APIC_DEBUG 2
-
-extern int apic_verbosity;
-extern int apic_runs_main_timer;
-extern int ioapic_force;
-extern int apic_mapped;
-
-/*
- * Define the default level of output to be very little
- * This can be turned up by using apic=verbose for more
- * information and apic=debug for _lots_ of information.
- * apic_verbosity is defined in apic.c
- */
-#define apic_printk(v, s, a...) do { \
- if ((v) <= apic_verbosity) \
- printk(s, ##a); \
- } while (0)
-
-struct pt_regs;
-
-/*
- * Basic functions accessing APICs.
- */
-
-static __inline void apic_write(unsigned long reg, unsigned int v)
-{
- *((volatile unsigned int *)(APIC_BASE+reg)) = v;
-}
-
-static __inline unsigned int apic_read(unsigned long reg)
-{
- return *((volatile unsigned int *)(APIC_BASE+reg));
-}
-
-extern void apic_wait_icr_idle(void);
-extern unsigned int safe_apic_wait_icr_idle(void);
-
-static inline void ack_APIC_irq(void)
-{
- /*
- * ack_APIC_irq() actually gets compiled as a single instruction:
- * - a single rmw on Pentium/82489DX
- * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
- * ... yummie.
- */
-
- /* Docs say use 0 for future compatibility */
- apic_write(APIC_EOI, 0);
-}
-
-extern int get_maxlvt (void);
-extern void clear_local_APIC (void);
-extern void connect_bsp_APIC (void);
-extern void disconnect_bsp_APIC (int virt_wire_setup);
-extern void disable_local_APIC (void);
-extern int verify_local_APIC (void);
-extern void cache_APIC_registers (void);
-extern void sync_Arb_IDs (void);
-extern void init_bsp_APIC (void);
-extern void setup_local_APIC (void);
-extern void init_apic_mappings (void);
-extern void smp_local_timer_interrupt (void);
-extern void setup_boot_APIC_clock (void);
-extern void setup_secondary_APIC_clock (void);
-extern int APIC_init_uniprocessor (void);
-extern void disable_APIC_timer(void);
-extern void enable_APIC_timer(void);
-extern void setup_apic_routing(void);
-
-extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
- unsigned char msg_type, unsigned char mask);
-
-extern int apic_is_clustered_box(void);
-
-#define K8_APIC_EXT_LVT_BASE 0x500
-#define K8_APIC_EXT_INT_MSG_FIX 0x0
-#define K8_APIC_EXT_INT_MSG_SMI 0x2
-#define K8_APIC_EXT_INT_MSG_NMI 0x4
-#define K8_APIC_EXT_INT_MSG_EXT 0x7
-#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
-
-void smp_send_timer_broadcast_ipi(void);
-void switch_APIC_timer_to_ipi(void *cpumask);
-void switch_ipi_to_APIC_timer(void *cpumask);
-
-#define ARCH_APICTIMER_STOPS_ON_C3 1
-
-extern unsigned boot_cpu_id;
-extern int local_apic_timer_c2_ok;
-
-#endif /* __ASM_APIC_H */
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
deleted file mode 100644
index 1dd4006..0000000
--- a/include/asm-x86_64/apicdef.h
+++ /dev/null
@@ -1,392 +0,0 @@
-#ifndef __ASM_APICDEF_H
-#define __ASM_APICDEF_H
-
-/*
- * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
- *
- * Alan Cox <Alan.Cox@linux.org>, 1995.
- * Ingo Molnar <mingo@redhat.com>, 1999, 2000
- */
-
-#define APIC_DEFAULT_PHYS_BASE 0xfee00000
-
-#define APIC_ID 0x20
-#define APIC_ID_MASK (0xFFu<<24)
-#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
-#define SET_APIC_ID(x) (((x)<<24))
-#define APIC_LVR 0x30
-#define APIC_LVR_MASK 0xFF00FF
-#define GET_APIC_VERSION(x) ((x)&0xFFu)
-#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
-#define APIC_INTEGRATED(x) ((x)&0xF0u)
-#define APIC_TASKPRI 0x80
-#define APIC_TPRI_MASK 0xFFu
-#define APIC_ARBPRI 0x90
-#define APIC_ARBPRI_MASK 0xFFu
-#define APIC_PROCPRI 0xA0
-#define APIC_EOI 0xB0
-#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */
-#define APIC_RRR 0xC0
-#define APIC_LDR 0xD0
-#define APIC_LDR_MASK (0xFFu<<24)
-#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
-#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
-#define APIC_ALL_CPUS 0xFFu
-#define APIC_DFR 0xE0
-#define APIC_DFR_CLUSTER 0x0FFFFFFFul
-#define APIC_DFR_FLAT 0xFFFFFFFFul
-#define APIC_SPIV 0xF0
-#define APIC_SPIV_FOCUS_DISABLED (1<<9)
-#define APIC_SPIV_APIC_ENABLED (1<<8)
-#define APIC_ISR 0x100
-#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
-#define APIC_TMR 0x180
-#define APIC_IRR 0x200
-#define APIC_ESR 0x280
-#define APIC_ESR_SEND_CS 0x00001
-#define APIC_ESR_RECV_CS 0x00002
-#define APIC_ESR_SEND_ACC 0x00004
-#define APIC_ESR_RECV_ACC 0x00008
-#define APIC_ESR_SENDILL 0x00020
-#define APIC_ESR_RECVILL 0x00040
-#define APIC_ESR_ILLREGA 0x00080
-#define APIC_ICR 0x300
-#define APIC_DEST_SELF 0x40000
-#define APIC_DEST_ALLINC 0x80000
-#define APIC_DEST_ALLBUT 0xC0000
-#define APIC_ICR_RR_MASK 0x30000
-#define APIC_ICR_RR_INVALID 0x00000
-#define APIC_ICR_RR_INPROG 0x10000
-#define APIC_ICR_RR_VALID 0x20000
-#define APIC_INT_LEVELTRIG 0x08000
-#define APIC_INT_ASSERT 0x04000
-#define APIC_ICR_BUSY 0x01000
-#define APIC_DEST_LOGICAL 0x00800
-#define APIC_DEST_PHYSICAL 0x00000
-#define APIC_DM_FIXED 0x00000
-#define APIC_DM_LOWEST 0x00100
-#define APIC_DM_SMI 0x00200
-#define APIC_DM_REMRD 0x00300
-#define APIC_DM_NMI 0x00400
-#define APIC_DM_INIT 0x00500
-#define APIC_DM_STARTUP 0x00600
-#define APIC_DM_EXTINT 0x00700
-#define APIC_VECTOR_MASK 0x000FF
-#define APIC_ICR2 0x310
-#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
-#define SET_APIC_DEST_FIELD(x) ((x)<<24)
-#define APIC_LVTT 0x320
-#define APIC_LVTTHMR 0x330
-#define APIC_LVTPC 0x340
-#define APIC_LVT0 0x350
-#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
-#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
-#define SET_APIC_TIMER_BASE(x) (((x)<<18))
-#define APIC_TIMER_BASE_CLKIN 0x0
-#define APIC_TIMER_BASE_TMBASE 0x1
-#define APIC_TIMER_BASE_DIV 0x2
-#define APIC_LVT_TIMER_PERIODIC (1<<17)
-#define APIC_LVT_MASKED (1<<16)
-#define APIC_LVT_LEVEL_TRIGGER (1<<15)
-#define APIC_LVT_REMOTE_IRR (1<<14)
-#define APIC_INPUT_POLARITY (1<<13)
-#define APIC_SEND_PENDING (1<<12)
-#define APIC_MODE_MASK 0x700
-#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
-#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8))
-#define APIC_MODE_FIXED 0x0
-#define APIC_MODE_NMI 0x4
-#define APIC_MODE_EXTINT 0x7
-#define APIC_LVT1 0x360
-#define APIC_LVTERR 0x370
-#define APIC_TMICT 0x380
-#define APIC_TMCCT 0x390
-#define APIC_TDCR 0x3E0
-#define APIC_TDR_DIV_TMBASE (1<<2)
-#define APIC_TDR_DIV_1 0xB
-#define APIC_TDR_DIV_2 0x0
-#define APIC_TDR_DIV_4 0x1
-#define APIC_TDR_DIV_8 0x2
-#define APIC_TDR_DIV_16 0x3
-#define APIC_TDR_DIV_32 0x8
-#define APIC_TDR_DIV_64 0x9
-#define APIC_TDR_DIV_128 0xA
-
-#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
-
-#define MAX_IO_APICS 128
-#define MAX_LOCAL_APIC 256
-
-/*
- * All x86-64 systems are xAPIC compatible.
- * In the following, "apicid" is a physical APIC ID.
- */
-#define XAPIC_DEST_CPUS_SHIFT 4
-#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
-#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
-#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
-#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
-#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
-#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
-
-/*
- * the local APIC register structure, memory mapped. Not terribly well
- * tested, but we might eventually use this one in the future - the
- * problem why we cannot use it right now is the P5 APIC, it has an
- * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
- */
-#define u32 unsigned int
-
-struct local_apic {
-
-/*000*/ struct { u32 __reserved[4]; } __reserved_01;
-
-/*010*/ struct { u32 __reserved[4]; } __reserved_02;
-
-/*020*/ struct { /* APIC ID Register */
- u32 __reserved_1 : 24,
- phys_apic_id : 4,
- __reserved_2 : 4;
- u32 __reserved[3];
- } id;
-
-/*030*/ const
- struct { /* APIC Version Register */
- u32 version : 8,
- __reserved_1 : 8,
- max_lvt : 8,
- __reserved_2 : 8;
- u32 __reserved[3];
- } version;
-
-/*040*/ struct { u32 __reserved[4]; } __reserved_03;
-
-/*050*/ struct { u32 __reserved[4]; } __reserved_04;
-
-/*060*/ struct { u32 __reserved[4]; } __reserved_05;
-
-/*070*/ struct { u32 __reserved[4]; } __reserved_06;
-
-/*080*/ struct { /* Task Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } tpr;
-
-/*090*/ const
- struct { /* Arbitration Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } apr;
-
-/*0A0*/ const
- struct { /* Processor Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } ppr;
-
-/*0B0*/ struct { /* End Of Interrupt Register */
- u32 eoi;
- u32 __reserved[3];
- } eoi;
-
-/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
-
-/*0D0*/ struct { /* Logical Destination Register */
- u32 __reserved_1 : 24,
- logical_dest : 8;
- u32 __reserved_2[3];
- } ldr;
-
-/*0E0*/ struct { /* Destination Format Register */
- u32 __reserved_1 : 28,
- model : 4;
- u32 __reserved_2[3];
- } dfr;
-
-/*0F0*/ struct { /* Spurious Interrupt Vector Register */
- u32 spurious_vector : 8,
- apic_enabled : 1,
- focus_cpu : 1,
- __reserved_2 : 22;
- u32 __reserved_3[3];
- } svr;
-
-/*100*/ struct { /* In Service Register */
-/*170*/ u32 bitfield;
- u32 __reserved[3];
- } isr [8];
-
-/*180*/ struct { /* Trigger Mode Register */
-/*1F0*/ u32 bitfield;
- u32 __reserved[3];
- } tmr [8];
-
-/*200*/ struct { /* Interrupt Request Register */
-/*270*/ u32 bitfield;
- u32 __reserved[3];
- } irr [8];
-
-/*280*/ union { /* Error Status Register */
- struct {
- u32 send_cs_error : 1,
- receive_cs_error : 1,
- send_accept_error : 1,
- receive_accept_error : 1,
- __reserved_1 : 1,
- send_illegal_vector : 1,
- receive_illegal_vector : 1,
- illegal_register_address : 1,
- __reserved_2 : 24;
- u32 __reserved_3[3];
- } error_bits;
- struct {
- u32 errors;
- u32 __reserved_3[3];
- } all_errors;
- } esr;
-
-/*290*/ struct { u32 __reserved[4]; } __reserved_08;
-
-/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
-
-/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
-
-/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
-
-/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
-
-/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
-
-/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
-
-/*300*/ struct { /* Interrupt Command Register 1 */
- u32 vector : 8,
- delivery_mode : 3,
- destination_mode : 1,
- delivery_status : 1,
- __reserved_1 : 1,
- level : 1,
- trigger : 1,
- __reserved_2 : 2,
- shorthand : 2,
- __reserved_3 : 12;
- u32 __reserved_4[3];
- } icr1;
-
-/*310*/ struct { /* Interrupt Command Register 2 */
- union {
- u32 __reserved_1 : 24,
- phys_dest : 4,
- __reserved_2 : 4;
- u32 __reserved_3 : 24,
- logical_dest : 8;
- } dest;
- u32 __reserved_4[3];
- } icr2;
-
-/*320*/ struct { /* LVT - Timer */
- u32 vector : 8,
- __reserved_1 : 4,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- timer_mode : 1,
- __reserved_3 : 14;
- u32 __reserved_4[3];
- } lvt_timer;
-
-/*330*/ struct { /* LVT - Thermal Sensor */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- __reserved_3 : 15;
- u32 __reserved_4[3];
- } lvt_thermal;
-
-/*340*/ struct { /* LVT - Performance Counter */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- __reserved_3 : 15;
- u32 __reserved_4[3];
- } lvt_pc;
-
-/*350*/ struct { /* LVT - LINT0 */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- polarity : 1,
- remote_irr : 1,
- trigger : 1,
- mask : 1,
- __reserved_2 : 15;
- u32 __reserved_3[3];
- } lvt_lint0;
-
-/*360*/ struct { /* LVT - LINT1 */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- polarity : 1,
- remote_irr : 1,
- trigger : 1,
- mask : 1,
- __reserved_2 : 15;
- u32 __reserved_3[3];
- } lvt_lint1;
-
-/*370*/ struct { /* LVT - Error */
- u32 vector : 8,
- __reserved_1 : 4,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- __reserved_3 : 15;
- u32 __reserved_4[3];
- } lvt_error;
-
-/*380*/ struct { /* Timer Initial Count Register */
- u32 initial_count;
- u32 __reserved_2[3];
- } timer_icr;
-
-/*390*/ const
- struct { /* Timer Current Count Register */
- u32 curr_count;
- u32 __reserved_2[3];
- } timer_ccr;
-
-/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
-
-/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
-
-/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
-
-/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
-
-/*3E0*/ struct { /* Timer Divide Configuration Register */
- u32 divisor : 4,
- __reserved_1 : 28;
- u32 __reserved_2[3];
- } timer_dcr;
-
-/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
-
-} __attribute__ ((packed));
-
-#undef u32
-
-#define BAD_APICID 0xFFu
-
-#endif
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
deleted file mode 100644
index f2e6463..0000000
--- a/include/asm-x86_64/atomic.h
+++ /dev/null
@@ -1,466 +0,0 @@
-#ifndef __ARCH_X86_64_ATOMIC__
-#define __ARCH_X86_64_ATOMIC__
-
-#include <asm/alternative.h>
-#include <asm/cmpxchg.h>
-
-/* atomic_t should be 32 bit signed type */
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-typedef struct { int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#define atomic_read(v) ((v)->counter)
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v,i) (((v)->counter) = (i))
-
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic_add(int i, atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
-}
-
-/**
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic_sub(int i, atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "subl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
-}
-
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "subl %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
- return c;
-}
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
-}
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-static __inline__ void atomic_dec(atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
-}
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
- return c != 0;
-}
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
- return c != 0;
-}
-
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "addl %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
- return c;
-}
-
-/**
- * atomic_add_return - add and return
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
- int __i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1"
- :"+r" (i), "+m" (v->counter)
- : : "memory");
- return i + __i;
-}
-
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
-{
- return atomic_add_return(-i,v);
-}
-
-#define atomic_inc_return(v) (atomic_add_return(1,v))
-#define atomic_dec_return(v) (atomic_sub_return(1,v))
-
-/* An 64bit atomic type */
-
-typedef struct { volatile long counter; } atomic64_t;
-
-#define ATOMIC64_INIT(i) { (i) }
-
-/**
- * atomic64_read - read atomic64 variable
- * @v: pointer of type atomic64_t
- *
- * Atomically reads the value of @v.
- * Doesn't imply a read memory barrier.
- */
-#define atomic64_read(v) ((v)->counter)
-
-/**
- * atomic64_set - set atomic64 variable
- * @v: pointer to type atomic64_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic64_set(v,i) (((v)->counter) = (i))
-
-/**
- * atomic64_add - add integer to atomic64 variable
- * @i: integer value to add
- * @v: pointer to type atomic64_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic64_add(long i, atomic64_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "addq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
-}
-
-/**
- * atomic64_sub - subtract the atomic64 variable
- * @i: integer value to subtract
- * @v: pointer to type atomic64_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic64_sub(long i, atomic64_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "subq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
-}
-
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer to type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "subq %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
- return c;
-}
-
-/**
- * atomic64_inc - increment atomic64 variable
- * @v: pointer to type atomic64_t
- *
- * Atomically increments @v by 1.
- */
-static __inline__ void atomic64_inc(atomic64_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "incq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
-}
-
-/**
- * atomic64_dec - decrement atomic64 variable
- * @v: pointer to type atomic64_t
- *
- * Atomically decrements @v by 1.
- */
-static __inline__ void atomic64_dec(atomic64_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "decq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
-}
-
-/**
- * atomic64_dec_and_test - decrement and test
- * @v: pointer to type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __inline__ int atomic64_dec_and_test(atomic64_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "decq %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
- return c != 0;
-}
-
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer to type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic64_inc_and_test(atomic64_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "incq %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
- return c != 0;
-}
-
-/**
- * atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer to type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "addq %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
- return c;
-}
-
-/**
- * atomic64_add_return - add and return
- * @i: integer value to add
- * @v: pointer to type atomic64_t
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static __inline__ long atomic64_add_return(long i, atomic64_t *v)
-{
- long __i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddq %0, %1;"
- :"+r" (i), "+m" (v->counter)
- : : "memory");
- return i + __i;
-}
-
-static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
-{
- return atomic64_add_return(-i,v);
-}
-
-#define atomic64_inc_return(v) (atomic64_add_return(1,v))
-#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
-
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-/**
- * atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
-
-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
-: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
-
-/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#include <asm-generic/atomic.h>
-#endif
diff --git a/include/asm-x86_64/auxvec.h b/include/asm-x86_64/auxvec.h
deleted file mode 100644
index 1d5ab0d..0000000
--- a/include/asm-x86_64/auxvec.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_X86_64_AUXVEC_H
-#define __ASM_X86_64_AUXVEC_H
-
-#define AT_SYSINFO_EHDR 33
-
-#endif
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
deleted file mode 100644
index d4dbbe5..0000000
--- a/include/asm-x86_64/bitops.h
+++ /dev/null
@@ -1,427 +0,0 @@
-#ifndef _X86_64_BITOPS_H
-#define _X86_64_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-#include <asm/alternative.h>
-
-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
-/* Technically wrong, but this avoids compilation errors on some gcc
- versions. */
-#define ADDR "=m" (*(volatile long *) addr)
-#else
-#define ADDR "+m" (*(volatile long *) addr)
-#endif
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void set_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %1,%0"
- :ADDR
- :"dIr" (nr) : "memory");
-}
-
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __inline__ void __set_bit(int nr, volatile void * addr)
-{
- __asm__ volatile(
- "btsl %1,%0"
- :ADDR
- :"dIr" (nr) : "memory");
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static __inline__ void clear_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %1,%0"
- :ADDR
- :"dIr" (nr));
-}
-
-static __inline__ void __clear_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__(
- "btrl %1,%0"
- :ADDR
- :"dIr" (nr));
-}
-
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __inline__ void __change_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__(
- "btcl %1,%0"
- :ADDR
- :"dIr" (nr));
-}
-
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void change_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__( LOCK_PREFIX
- "btcl %1,%0"
- :ADDR
- :"dIr" (nr));
-}
-
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),ADDR
- :"dIr" (nr) : "memory");
- return oldbit;
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__(
- "btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),ADDR
- :"dIr" (nr));
- return oldbit;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),ADDR
- :"dIr" (nr) : "memory");
- return oldbit;
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__(
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),ADDR
- :"dIr" (nr));
- return oldbit;
-}
-
-/* WARNING: non atomic and it can be reordered! */
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__(
- "btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),ADDR
- :"dIr" (nr) : "memory");
- return oldbit;
-}
-
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),ADDR
- :"dIr" (nr) : "memory");
- return oldbit;
-}
-
-#if 0 /* Fool kernel-doc since it doesn't do macros yet */
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static int test_bit(int nr, const volatile void * addr);
-#endif
-
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
-{
- return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int variable_test_bit(int nr, volatile const void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__(
- "btl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit)
- :"m" (*(volatile long *)addr),"dIr" (nr));
- return oldbit;
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
-
-#undef ADDR
-
-extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
-extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
-extern long find_first_bit(const unsigned long * addr, unsigned long size);
-extern long find_next_bit(const unsigned long * addr, long size, long offset);
-
-/* return index of first bet set in val or max when no bit is set */
-static inline unsigned long __scanbit(unsigned long val, unsigned long max)
-{
- asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
- return val;
-}
-
-#define find_first_bit(addr,size) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- (__scanbit(*(unsigned long *)addr,(size))) : \
- find_first_bit(addr,size)))
-
-#define find_next_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
- find_next_bit(addr,size,off)))
-
-#define find_first_zero_bit(addr,size) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- (__scanbit(~*(unsigned long *)addr,(size))) : \
- find_first_zero_bit(addr,size)))
-
-#define find_next_zero_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
- find_next_zero_bit(addr,size,off)))
-
-/*
- * Find string of zero bits in a bitmap. -1 when not found.
- */
-extern unsigned long
-find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
-
-static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, bitmap);
- i++;
- }
-}
-
-static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __clear_bit(i, bitmap);
- i++;
- }
-}
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static __inline__ unsigned long ffz(unsigned long word)
-{
- __asm__("bsfq %1,%0"
- :"=r" (word)
- :"r" (~word));
- return word;
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static __inline__ unsigned long __ffs(unsigned long word)
-{
- __asm__("bsfq %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-/*
- * __fls: find last bit set.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static __inline__ unsigned long __fls(unsigned long word)
-{
- __asm__("bsrq %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/sched.h>
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static __inline__ int ffs(int x)
-{
- int r;
-
- __asm__("bsfl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=r" (r) : "rm" (x), "r" (-1));
- return r+1;
-}
-
-/**
- * fls64 - find last bit set in 64 bit word
- * @x: the word to search
- *
- * This is defined the same way as fls.
- */
-static __inline__ int fls64(__u64 x)
-{
- if (x == 0)
- return 0;
- return __fls(x) + 1;
-}
-
-/**
- * fls - find last bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs.
- */
-static __inline__ int fls(int x)
-{
- int r;
-
- __asm__("bsrl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=&r" (r) : "rm" (x), "rm" (-1));
- return r+1;
-}
-
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
-#include <asm-generic/bitops/hweight.h>
-
-#endif /* __KERNEL__ */
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/ext2-non-atomic.h>
-
-#define ext2_set_bit_atomic(lock,nr,addr) \
- test_and_set_bit((nr),(unsigned long*)addr)
-#define ext2_clear_bit_atomic(lock,nr,addr) \
- test_and_clear_bit((nr),(unsigned long*)addr)
-
-#include <asm-generic/bitops/minix.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86_64/boot.h b/include/asm-x86_64/boot.h
deleted file mode 100644
index 3c46cea..0000000
--- a/include/asm-x86_64/boot.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/boot.h>
diff --git a/include/asm-x86_64/bootparam.h b/include/asm-x86_64/bootparam.h
deleted file mode 100644
index aa82e52..0000000
--- a/include/asm-x86_64/bootparam.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/bootparam.h>
diff --git a/include/asm-x86_64/bootsetup.h b/include/asm-x86_64/bootsetup.h
deleted file mode 100644
index 7b1c3ad..0000000
--- a/include/asm-x86_64/bootsetup.h
+++ /dev/null
@@ -1,40 +0,0 @@
-
-#ifndef _X86_64_BOOTSETUP_H
-#define _X86_64_BOOTSETUP_H 1
-
-#define BOOT_PARAM_SIZE 4096
-extern char x86_boot_params[BOOT_PARAM_SIZE];
-
-/*
- * This is set up by the setup-routine at boot-time
- */
-#define PARAM ((unsigned char *)x86_boot_params)
-#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
-#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
-#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
-#define INITRD_START (*(unsigned int *) (PARAM+0x218))
-#define INITRD_SIZE (*(unsigned int *) (PARAM+0x21c))
-#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
-#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
-#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
-#define COMMAND_LINE boot_command_line
-
-#define RAMDISK_IMAGE_START_MASK 0x07FF
-#define RAMDISK_PROMPT_FLAG 0x8000
-#define RAMDISK_LOAD_FLAG 0x4000
-
-#endif
diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h
deleted file mode 100644
index 6826064..0000000
--- a/include/asm-x86_64/bug.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ASM_X8664_BUG_H
-#define __ASM_X8664_BUG_H 1
-
-#ifdef CONFIG_BUG
-#define HAVE_ARCH_BUG
-
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define BUG() \
- do { \
- asm volatile("1:\tud2\n" \
- ".pushsection __bug_table,\"a\"\n" \
- "2:\t.quad 1b, %c0\n" \
- "\t.word %c1, 0\n" \
- "\t.org 2b+%c2\n" \
- ".popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (sizeof(struct bug_entry))); \
- for(;;) ; \
- } while(0)
-#else
-#define BUG() \
- do { \
- asm volatile("ud2"); \
- for(;;) ; \
- } while(0)
-#endif
-
-void out_of_line_bug(void);
-#else
-static inline void out_of_line_bug(void) { }
-#endif
-
-#include <asm-generic/bug.h>
-#endif
diff --git a/include/asm-x86_64/bugs.h b/include/asm-x86_64/bugs.h
deleted file mode 100644
index b33dc04..0000000
--- a/include/asm-x86_64/bugs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_X86_64_BUGS_H
-#define _ASM_X86_64_BUGS_H
-
-void check_bugs(void);
-
-#endif /* _ASM_X86_64_BUGS_H */
diff --git a/include/asm-x86_64/byteorder.h b/include/asm-x86_64/byteorder.h
deleted file mode 100644
index 5e86c86..0000000
--- a/include/asm-x86_64/byteorder.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _X86_64_BYTEORDER_H
-#define _X86_64_BYTEORDER_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
-{
- __asm__("bswapq %0" : "=r" (x) : "0" (x));
- return x;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
-{
- __asm__("bswapl %0" : "=r" (x) : "0" (x));
- return x;
-}
-
-/* Do not define swab16. Gcc is smart enough to recognize "C" version and
- convert it into rotation or exhange. */
-
-#define __arch__swab32(x) ___arch__swab32(x)
-#define __arch__swab64(x) ___arch__swab64(x)
-
-#endif /* __GNUC__ */
-
-#define __BYTEORDER_HAS_U64__
-
-#include <linux/byteorder/little_endian.h>
-
-#endif /* _X86_64_BYTEORDER_H */
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
deleted file mode 100644
index 052df75..0000000
--- a/include/asm-x86_64/cache.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * include/asm-x86_64/cache.h
- */
-#ifndef __ARCH_X8664_CACHE_H
-#define __ARCH_X8664_CACHE_H
-
-
-/* L1 cache line size */
-#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-#ifdef CONFIG_X86_VSMP
-
-/* vSMP Internode cacheline shift */
-#define INTERNODE_CACHE_SHIFT (12)
-#ifdef CONFIG_SMP
-#define __cacheline_aligned_in_smp \
- __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
- __attribute__((__section__(".data.page_aligned")))
-#endif
-
-#endif
-
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
-
-#endif
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
deleted file mode 100644
index ab1cb5c..0000000
--- a/include/asm-x86_64/cacheflush.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef _X8664_CACHEFLUSH_H
-#define _X8664_CACHEFLUSH_H
-
-/* Keep includes the same across arches. */
-#include <linux/mm.h>
-
-/* Caches aren't brain-dead on the intel. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-void global_flush_tlb(void);
-int change_page_attr(struct page *page, int numpages, pgprot_t prot);
-int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
-
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
-#endif
-
-#endif /* _X8664_CACHEFLUSH_H */
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
deleted file mode 100644
index 67f6040..0000000
--- a/include/asm-x86_64/calgary.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Derived from include/asm-powerpc/iommu.h
- *
- * Copyright IBM Corporation, 2006-2007
- *
- * Author: Jon Mason <jdmason@us.ibm.com>
- * Author: Muli Ben-Yehuda <muli@il.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_X86_64_CALGARY_H
-#define _ASM_X86_64_CALGARY_H
-
-#include <linux/spinlock.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/timer.h>
-#include <asm/types.h>
-
-struct iommu_table {
- struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
- unsigned long it_base; /* mapped address of tce table */
- unsigned long it_hint; /* Hint for next alloc */
- unsigned long *it_map; /* A simple allocation bitmap for now */
- void __iomem *bbar; /* Bridge BAR */
- u64 tar_val; /* Table Address Register */
- struct timer_list watchdog_timer;
- spinlock_t it_lock; /* Protects it_map */
- unsigned int it_size; /* Size of iommu table in entries */
- unsigned char it_busno; /* Bus number this table belongs to */
-};
-
-struct cal_chipset_ops {
- void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
- void (*tce_cache_blast)(struct iommu_table *tbl);
- void (*dump_error_regs)(struct iommu_table *tbl);
-};
-
-#define TCE_TABLE_SIZE_UNSPECIFIED ~0
-#define TCE_TABLE_SIZE_64K 0
-#define TCE_TABLE_SIZE_128K 1
-#define TCE_TABLE_SIZE_256K 2
-#define TCE_TABLE_SIZE_512K 3
-#define TCE_TABLE_SIZE_1M 4
-#define TCE_TABLE_SIZE_2M 5
-#define TCE_TABLE_SIZE_4M 6
-#define TCE_TABLE_SIZE_8M 7
-
-extern int use_calgary;
-
-#ifdef CONFIG_CALGARY_IOMMU
-extern int calgary_iommu_init(void);
-extern void detect_calgary(void);
-#else
-static inline int calgary_iommu_init(void) { return 1; }
-static inline void detect_calgary(void) { return; }
-#endif
-
-#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/calling.h b/include/asm-x86_64/calling.h
deleted file mode 100644
index 6f4f63a..0000000
--- a/include/asm-x86_64/calling.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Some macros to handle stack frames in assembly.
- */
-
-
-#define R15 0
-#define R14 8
-#define R13 16
-#define R12 24
-#define RBP 32
-#define RBX 40
-/* arguments: interrupts/non tracing syscalls only save upto here*/
-#define R11 48
-#define R10 56
-#define R9 64
-#define R8 72
-#define RAX 80
-#define RCX 88
-#define RDX 96
-#define RSI 104
-#define RDI 112
-#define ORIG_RAX 120 /* + error_code */
-/* end of arguments */
-/* cpu exception frame or undefined in case of fast syscall. */
-#define RIP 128
-#define CS 136
-#define EFLAGS 144
-#define RSP 152
-#define SS 160
-#define ARGOFFSET R11
-#define SWFRAME ORIG_RAX
-
- .macro SAVE_ARGS addskip=0,norcx=0,nor891011=0
- subq $9*8+\addskip,%rsp
- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
- movq %rdi,8*8(%rsp)
- CFI_REL_OFFSET rdi,8*8
- movq %rsi,7*8(%rsp)
- CFI_REL_OFFSET rsi,7*8
- movq %rdx,6*8(%rsp)
- CFI_REL_OFFSET rdx,6*8
- .if \norcx
- .else
- movq %rcx,5*8(%rsp)
- CFI_REL_OFFSET rcx,5*8
- .endif
- movq %rax,4*8(%rsp)
- CFI_REL_OFFSET rax,4*8
- .if \nor891011
- .else
- movq %r8,3*8(%rsp)
- CFI_REL_OFFSET r8,3*8
- movq %r9,2*8(%rsp)
- CFI_REL_OFFSET r9,2*8
- movq %r10,1*8(%rsp)
- CFI_REL_OFFSET r10,1*8
- movq %r11,(%rsp)
- CFI_REL_OFFSET r11,0*8
- .endif
- .endm
-
-#define ARG_SKIP 9*8
- .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
- .if \skipr11
- .else
- movq (%rsp),%r11
- CFI_RESTORE r11
- .endif
- .if \skipr8910
- .else
- movq 1*8(%rsp),%r10
- CFI_RESTORE r10
- movq 2*8(%rsp),%r9
- CFI_RESTORE r9
- movq 3*8(%rsp),%r8
- CFI_RESTORE r8
- .endif
- .if \skiprax
- .else
- movq 4*8(%rsp),%rax
- CFI_RESTORE rax
- .endif
- .if \skiprcx
- .else
- movq 5*8(%rsp),%rcx
- CFI_RESTORE rcx
- .endif
- .if \skiprdx
- .else
- movq 6*8(%rsp),%rdx
- CFI_RESTORE rdx
- .endif
- movq 7*8(%rsp),%rsi
- CFI_RESTORE rsi
- movq 8*8(%rsp),%rdi
- CFI_RESTORE rdi
- .if ARG_SKIP+\addskip > 0
- addq $ARG_SKIP+\addskip,%rsp
- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
- .endif
- .endm
-
- .macro LOAD_ARGS offset
- movq \offset(%rsp),%r11
- movq \offset+8(%rsp),%r10
- movq \offset+16(%rsp),%r9
- movq \offset+24(%rsp),%r8
- movq \offset+40(%rsp),%rcx
- movq \offset+48(%rsp),%rdx
- movq \offset+56(%rsp),%rsi
- movq \offset+64(%rsp),%rdi
- movq \offset+72(%rsp),%rax
- .endm
-
-#define REST_SKIP 6*8
- .macro SAVE_REST
- subq $REST_SKIP,%rsp
- CFI_ADJUST_CFA_OFFSET REST_SKIP
- movq %rbx,5*8(%rsp)
- CFI_REL_OFFSET rbx,5*8
- movq %rbp,4*8(%rsp)
- CFI_REL_OFFSET rbp,4*8
- movq %r12,3*8(%rsp)
- CFI_REL_OFFSET r12,3*8
- movq %r13,2*8(%rsp)
- CFI_REL_OFFSET r13,2*8
- movq %r14,1*8(%rsp)
- CFI_REL_OFFSET r14,1*8
- movq %r15,(%rsp)
- CFI_REL_OFFSET r15,0*8
- .endm
-
- .macro RESTORE_REST
- movq (%rsp),%r15
- CFI_RESTORE r15
- movq 1*8(%rsp),%r14
- CFI_RESTORE r14
- movq 2*8(%rsp),%r13
- CFI_RESTORE r13
- movq 3*8(%rsp),%r12
- CFI_RESTORE r12
- movq 4*8(%rsp),%rbp
- CFI_RESTORE rbp
- movq 5*8(%rsp),%rbx
- CFI_RESTORE rbx
- addq $REST_SKIP,%rsp
- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
- .endm
-
- .macro SAVE_ALL
- SAVE_ARGS
- SAVE_REST
- .endm
-
- .macro RESTORE_ALL addskip=0
- RESTORE_REST
- RESTORE_ARGS 0,\addskip
- .endm
-
- .macro icebp
- .byte 0xf1
- .endm
diff --git a/include/asm-x86_64/checksum.h b/include/asm-x86_64/checksum.h
deleted file mode 100644
index 419fe88..0000000
--- a/include/asm-x86_64/checksum.h
+++ /dev/null
@@ -1,195 +0,0 @@
-#ifndef _X86_64_CHECKSUM_H
-#define _X86_64_CHECKSUM_H
-
-/*
- * Checksums for x86-64
- * Copyright 2002 by Andi Kleen, SuSE Labs
- * with some code from asm-i386/checksum.h
- */
-
-#include <linux/compiler.h>
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-
-/**
- * csum_fold - Fold and invert a 32bit checksum.
- * sum: 32bit unfolded sum
- *
- * Fold a 32bit running checksum to 16bit and invert it. This is usually
- * the last step before putting a checksum into a packet.
- * Make sure not to mix with 64bit checksums.
- */
-static inline __sum16 csum_fold(__wsum sum)
-{
- __asm__(
- " addl %1,%0\n"
- " adcl $0xffff,%0"
- : "=r" (sum)
- : "r" ((__force u32)sum << 16),
- "0" ((__force u32)sum & 0xffff0000)
- );
- return (__force __sum16)(~(__force u32)sum >> 16);
-}
-
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries.
- *
- * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
- * Arnt Gulbrandsen.
- */
-
-/**
- * ip_fast_csum - Compute the IPv4 header checksum efficiently.
- * iph: ipv4 header
- * ihl: length of header / 4
- */
-static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
- unsigned int sum;
-
- asm( " movl (%1), %0\n"
- " subl $4, %2\n"
- " jbe 2f\n"
- " addl 4(%1), %0\n"
- " adcl 8(%1), %0\n"
- " adcl 12(%1), %0\n"
- "1: adcl 16(%1), %0\n"
- " lea 4(%1), %1\n"
- " decl %2\n"
- " jne 1b\n"
- " adcl $0, %0\n"
- " movl %0, %2\n"
- " shrl $16, %0\n"
- " addw %w2, %w0\n"
- " adcl $0, %0\n"
- " notl %0\n"
- "2:"
- /* Since the input registers which are loaded with iph and ihl
- are modified, we must also specify them as outputs, or gcc
- will assume they contain their original values. */
- : "=r" (sum), "=r" (iph), "=r" (ihl)
- : "1" (iph), "2" (ihl)
- : "memory");
- return (__force __sum16)sum;
-}
-
-/**
- * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
- * @saddr: source address
- * @daddr: destination address
- * @len: length of packet
- * @proto: ip protocol of packet
- * @sum: initial sum to be added in (32bit unfolded)
- *
- * Returns the pseudo header checksum the input data. Result is
- * 32bit unfolded.
- */
-static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- asm(" addl %1, %0\n"
- " adcl %2, %0\n"
- " adcl %3, %0\n"
- " adcl $0, %0\n"
- : "=r" (sum)
- : "g" (daddr), "g" (saddr),
- "g" ((len + proto)<<8), "0" (sum));
- return sum;
-}
-
-
-/**
- * csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
- * @saddr: source address
- * @daddr: destination address
- * @len: length of packet
- * @proto: ip protocol of packet
- * @sum: initial sum to be added in (32bit unfolded)
- *
- * Returns the 16bit pseudo header checksum the input data already
- * complemented and ready to be filled in.
- */
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len, unsigned short proto, __wsum sum)
-{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/**
- * csum_partial - Compute an internet checksum.
- * @buff: buffer to be checksummed
- * @len: length of buffer.
- * @sum: initial sum to be added in (32bit unfolded)
- *
- * Returns the 32bit unfolded internet checksum of the buffer.
- * Before filling it in it needs to be csum_fold()'ed.
- * buff should be aligned to a 64bit boundary if possible.
- */
-extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-
-#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
-#define HAVE_CSUM_COPY_USER 1
-
-
-/* Do not call this directly. Use the wrappers below */
-extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
- int len,
- __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
-
-
-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum isum, int *errp);
-extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
- int len, __wsum isum, int *errp);
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
- __wsum sum);
-
-/* Old names. To be removed. */
-#define csum_and_copy_to_user csum_partial_copy_to_user
-#define csum_and_copy_from_user csum_partial_copy_from_user
-
-/**
- * ip_compute_csum - Compute an 16bit IP checksum.
- * @buff: buffer address.
- * @len: length of buffer.
- *
- * Returns the 16bit folded/inverted checksum of the passed buffer.
- * Ready to fill in.
- */
-extern __sum16 ip_compute_csum(const void *buff, int len);
-
-/**
- * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
- * @saddr: source address
- * @daddr: destination address
- * @len: length of packet
- * @proto: protocol of packet
- * @sum: initial sum (32bit unfolded) to be added in
- *
- * Computes an IPv6 pseudo header checksum. This sum is added the checksum
- * into UDP/TCP packets and contains some link layer information.
- * Returns the unfolded 32bit checksum.
- */
-
-struct in6_addr;
-
-#define _HAVE_ARCH_IPV6_CSUM 1
-extern __sum16
-csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
- __u32 len, unsigned short proto, __wsum sum);
-
-static inline unsigned add32_with_carry(unsigned a, unsigned b)
-{
- asm("addl %2,%0\n\t"
- "adcl $0,%0"
- : "=r" (a)
- : "0" (a), "r" (b));
- return a;
-}
-
-#endif
-
diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h
deleted file mode 100644
index 5e18206..0000000
--- a/include/asm-x86_64/cmpxchg.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef __ASM_CMPXCHG_H
-#define __ASM_CMPXCHG_H
-
-#include <asm/alternative.h> /* Provides LOCK_PREFIX */
-
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
-#define __xg(x) ((volatile long *)(x))
-
-static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
-{
- *ptr = val;
-}
-
-#define _set_64bit set_64bit
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %k0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 8:
- __asm__ __volatile__("xchgq %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-static inline unsigned long __cmpxchg_local(volatile void *ptr,
- unsigned long old, unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__("cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- __asm__ __volatile__("cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-#define cmpxchg_local(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-#endif
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
deleted file mode 100644
index 53cb96b..0000000
--- a/include/asm-x86_64/compat.h
+++ /dev/null
@@ -1,212 +0,0 @@
-#ifndef _ASM_X86_64_COMPAT_H
-#define _ASM_X86_64_COMPAT_H
-
-/*
- * Architecture specific compatibility types
- */
-#include <linux/types.h>
-#include <linux/sched.h>
-
-#define COMPAT_USER_HZ 100
-
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_time_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
-typedef u16 __compat_uid_t;
-typedef u16 __compat_gid_t;
-typedef u32 __compat_uid32_t;
-typedef u32 __compat_gid32_t;
-typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
-typedef u16 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
-typedef u16 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
-typedef u32 compat_caddr_t;
-typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_timer_t;
-typedef s32 compat_key_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
-typedef s64 __attribute__((aligned(4))) compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
-typedef u64 __attribute__((aligned(4))) compat_u64;
-
-struct compat_timespec {
- compat_time_t tv_sec;
- s32 tv_nsec;
-};
-
-struct compat_timeval {
- compat_time_t tv_sec;
- s32 tv_usec;
-};
-
-struct compat_stat {
- compat_dev_t st_dev;
- u16 __pad1;
- compat_ino_t st_ino;
- compat_mode_t st_mode;
- compat_nlink_t st_nlink;
- __compat_uid_t st_uid;
- __compat_gid_t st_gid;
- compat_dev_t st_rdev;
- u16 __pad2;
- u32 st_size;
- u32 st_blksize;
- u32 st_blocks;
- u32 st_atime;
- u32 st_atime_nsec;
- u32 st_mtime;
- u32 st_mtime_nsec;
- u32 st_ctime;
- u32 st_ctime_nsec;
- u32 __unused4;
- u32 __unused5;
-};
-
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
-/*
- * IA32 uses 4 byte alignment for 64 bit quantities,
- * so we need to pack this structure.
- */
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-} __attribute__((packed));
-
-struct compat_statfs {
- int f_type;
- int f_bsize;
- int f_blocks;
- int f_bfree;
- int f_bavail;
- int f_files;
- int f_ffree;
- compat_fsid_t f_fsid;
- int f_namelen; /* SunOS ignores this field. */
- int f_frsize;
- int f_spare[5];
-};
-
-#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-typedef u32 compat_old_sigset_t; /* at least 32 bits */
-
-#define _COMPAT_NSIG 64
-#define _COMPAT_NSIG_BPW 32
-
-typedef u32 compat_sigset_word;
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
-
-struct compat_ipc64_perm {
- compat_key_t key;
- __compat_uid32_t uid;
- __compat_gid32_t gid;
- __compat_uid32_t cuid;
- __compat_gid32_t cgid;
- unsigned short mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- compat_ulong_t unused1;
- compat_ulong_t unused2;
-};
-
-struct compat_semid64_ds {
- struct compat_ipc64_perm sem_perm;
- compat_time_t sem_otime;
- compat_ulong_t __unused1;
- compat_time_t sem_ctime;
- compat_ulong_t __unused2;
- compat_ulong_t sem_nsems;
- compat_ulong_t __unused3;
- compat_ulong_t __unused4;
-};
-
-struct compat_msqid64_ds {
- struct compat_ipc64_perm msg_perm;
- compat_time_t msg_stime;
- compat_ulong_t __unused1;
- compat_time_t msg_rtime;
- compat_ulong_t __unused2;
- compat_time_t msg_ctime;
- compat_ulong_t __unused3;
- compat_ulong_t msg_cbytes;
- compat_ulong_t msg_qnum;
- compat_ulong_t msg_qbytes;
- compat_pid_t msg_lspid;
- compat_pid_t msg_lrpid;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
-struct compat_shmid64_ds {
- struct compat_ipc64_perm shm_perm;
- compat_size_t shm_segsz;
- compat_time_t shm_atime;
- compat_ulong_t __unused1;
- compat_time_t shm_dtime;
- compat_ulong_t __unused2;
- compat_time_t shm_ctime;
- compat_ulong_t __unused3;
- compat_pid_t shm_cpid;
- compat_pid_t shm_lpid;
- compat_ulong_t shm_nattch;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately comverted them already.
- */
-typedef u32 compat_uptr_t;
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
- return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
- return (u32)(unsigned long)uptr;
-}
-
-static __inline__ void __user *compat_alloc_user_space(long len)
-{
- struct pt_regs *regs = task_pt_regs(current);
- return (void __user *)regs->rsp - len;
-}
-
-static inline int is_compat_task(void)
-{
- return current_thread_info()->status & TS_COMPAT;
-}
-
-#endif /* _ASM_X86_64_COMPAT_H */
diff --git a/include/asm-x86_64/cpu.h b/include/asm-x86_64/cpu.h
deleted file mode 100644
index 8eea076..0000000
--- a/include/asm-x86_64/cpu.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/cpu.h>
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
deleted file mode 100644
index 8baefc3..0000000
--- a/include/asm-x86_64/cpufeature.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * cpufeature.h
- *
- * Defines x86 CPU feature bits
- */
-
-#ifndef __ASM_X8664_CPUFEATURE_H
-#define __ASM_X8664_CPUFEATURE_H
-
-#include <asm-i386/cpufeature.h>
-
-#undef cpu_has_vme
-#define cpu_has_vme 0
-
-#undef cpu_has_pae
-#define cpu_has_pae ___BUG___
-
-#undef cpu_has_mp
-#define cpu_has_mp 1 /* XXX */
-
-#undef cpu_has_k6_mtrr
-#define cpu_has_k6_mtrr 0
-
-#undef cpu_has_cyrix_arr
-#define cpu_has_cyrix_arr 0
-
-#undef cpu_has_centaur_mcr
-#define cpu_has_centaur_mcr 0
-
-#endif /* __ASM_X8664_CPUFEATURE_H */
diff --git a/include/asm-x86_64/cputime.h b/include/asm-x86_64/cputime.h
deleted file mode 100644
index a07012d..0000000
--- a/include/asm-x86_64/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __X86_64_CPUTIME_H
-#define __X86_64_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __X86_64_CPUTIME_H */
diff --git a/include/asm-x86_64/current.h b/include/asm-x86_64/current.h
deleted file mode 100644
index bc8adec..0000000
--- a/include/asm-x86_64/current.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _X86_64_CURRENT_H
-#define _X86_64_CURRENT_H
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
-
-#include <asm/pda.h>
-
-static inline struct task_struct *get_current(void)
-{
- struct task_struct *t = read_pda(pcurrent);
- return t;
-}
-
-#define current get_current()
-
-#else
-
-#ifndef ASM_OFFSET_H
-#include <asm/asm-offsets.h>
-#endif
-
-#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
-
-#endif
-
-#endif /* !(_X86_64_CURRENT_H) */
diff --git a/include/asm-x86_64/debugreg.h b/include/asm-x86_64/debugreg.h
deleted file mode 100644
index bd1aab1d..0000000
--- a/include/asm-x86_64/debugreg.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef _X86_64_DEBUGREG_H
-#define _X86_64_DEBUGREG_H
-
-
-/* Indicate the register numbers for a number of the specific
- debug registers. Registers 0-3 contain the addresses we wish to trap on */
-#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
-#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
-
-#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
-#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
-
-/* Define a few things for the status register. We can use this to determine
- which debugging register was responsible for the trap. The other bits
- are either reserved or not of interest to us. */
-
-#define DR_TRAP0 (0x1) /* db0 */
-#define DR_TRAP1 (0x2) /* db1 */
-#define DR_TRAP2 (0x4) /* db2 */
-#define DR_TRAP3 (0x8) /* db3 */
-
-#define DR_STEP (0x4000) /* single-step */
-#define DR_SWITCH (0x8000) /* task switch */
-
-/* Now define a bunch of things for manipulating the control register.
- The top two bytes of the control register consist of 4 fields of 4
- bits - each field corresponds to one of the four debug registers,
- and indicates what types of access we trap on, and how large the data
- field is that we are looking at */
-
-#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
-#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
-
-#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
-#define DR_RW_WRITE (0x1)
-#define DR_RW_READ (0x3)
-
-#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
-#define DR_LEN_2 (0x4)
-#define DR_LEN_4 (0xC)
-#define DR_LEN_8 (0x8)
-
-/* The low byte to the control register determine which registers are
- enabled. There are 4 fields of two bits. One bit is "local", meaning
- that the processor will reset the bit after a task switch and the other
- is global meaning that we have to explicitly reset the bit. With linux,
- you can use either one, since we explicitly zero the register when we enter
- kernel mode. */
-
-#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
-#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
-#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
-
-#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
-#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
-
-/* The second byte to the control register has a few special things.
- We can slow the instruction pipeline for instructions coming via the
- gdt or the ldt if we want to. I am not sure why this is an advantage */
-
-#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
-#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
-#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
-
-#endif
diff --git a/include/asm-x86_64/delay.h b/include/asm-x86_64/delay.h
deleted file mode 100644
index c2669f1f..0000000
--- a/include/asm-x86_64/delay.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef _X8664_DELAY_H
-#define _X8664_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/x86_64/lib/delay.c
- */
-
-/* Undefined functions to get compile-time errors */
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long usecs);
-extern void __delay(unsigned long loops);
-
-/* 0x10c7 is 2**32 / 1000000 (rounded up) */
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
- __udelay(n))
-
-/* 0x5 is 2**32 / 1000000000 (rounded up) */
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
-
-
-#endif /* defined(_X8664_DELAY_H) */
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
deleted file mode 100644
index ac991b5..0000000
--- a/include/asm-x86_64/desc.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* Written 2000 by Andi Kleen */
-#ifndef __ARCH_DESC_H
-#define __ARCH_DESC_H
-
-#include <linux/threads.h>
-#include <asm/ldt.h>
-
-#ifndef __ASSEMBLY__
-
-#include <linux/string.h>
-#include <linux/smp.h>
-#include <asm/desc_defs.h>
-
-#include <asm/segment.h>
-#include <asm/mmu.h>
-
-extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
-
-#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
-#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
-
-/*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
-extern struct desc_struct default_ldt[];
-extern struct gate_struct idt_table[];
-extern struct desc_ptr cpu_gdt_descr[];
-
-/* the cpu gdt accessor */
-#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
-
-static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
-{
- struct gate_struct s;
- s.offset_low = PTR_LOW(func);
- s.segment = __KERNEL_CS;
- s.ist = ist;
- s.p = 1;
- s.dpl = dpl;
- s.zero0 = 0;
- s.zero1 = 0;
- s.type = type;
- s.offset_middle = PTR_MIDDLE(func);
- s.offset_high = PTR_HIGH(func);
- /* does not need to be atomic because it is only done once at setup time */
- memcpy(adr, &s, 16);
-}
-
-static inline void set_intr_gate(int nr, void *func)
-{
- BUG_ON((unsigned)nr > 0xFF);
- _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
-}
-
-static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
-{
- BUG_ON((unsigned)nr > 0xFF);
- _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
-}
-
-static inline void set_system_gate(int nr, void *func)
-{
- BUG_ON((unsigned)nr > 0xFF);
- _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
-}
-
-static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
-{
- _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
-}
-
-static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
- unsigned size)
-{
- struct ldttss_desc d;
- memset(&d,0,sizeof(d));
- d.limit0 = size & 0xFFFF;
- d.base0 = PTR_LOW(tss);
- d.base1 = PTR_MIDDLE(tss) & 0xFF;
- d.type = type;
- d.p = 1;
- d.limit1 = (size >> 16) & 0xF;
- d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
- d.base3 = PTR_HIGH(tss);
- memcpy(ptr, &d, 16);
-}
-
-static inline void set_tss_desc(unsigned cpu, void *addr)
-{
- /*
- * sizeof(unsigned long) coming from an extra "long" at the end
- * of the iobitmap. See tss_struct definition in processor.h
- *
- * -1? seg base+limit should be pointing to the address of the
- * last valid byte
- */
- set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
- (unsigned long)addr, DESC_TSS,
- IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
-}
-
-static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
-{
- set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
- DESC_LDT, size * 8 - 1);
-}
-
-#define LDT_entry_a(info) \
- ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-/* Don't allow setting of the lm bit. It is useless anyways because
- 64bit system calls require __USER_CS. */
-#define LDT_entry_b(info) \
- (((info)->base_addr & 0xff000000) | \
- (((info)->base_addr & 0x00ff0000) >> 16) | \
- ((info)->limit & 0xf0000) | \
- (((info)->read_exec_only ^ 1) << 9) | \
- ((info)->contents << 10) | \
- (((info)->seg_not_present ^ 1) << 15) | \
- ((info)->seg_32bit << 22) | \
- ((info)->limit_in_pages << 23) | \
- ((info)->useable << 20) | \
- /* ((info)->lm << 21) | */ \
- 0x7000)
-
-#define LDT_empty(info) (\
- (info)->base_addr == 0 && \
- (info)->limit == 0 && \
- (info)->contents == 0 && \
- (info)->read_exec_only == 1 && \
- (info)->seg_32bit == 0 && \
- (info)->limit_in_pages == 0 && \
- (info)->seg_not_present == 1 && \
- (info)->useable == 0 && \
- (info)->lm == 0)
-
-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-{
- unsigned int i;
- u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
-
- for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
- gdt[i] = t->tls_array[i];
-}
-
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
-{
- int count = pc->size;
-
- if (likely(!count)) {
- clear_LDT();
- return;
- }
-
- set_ldt_desc(cpu, pc->ldt, count);
- load_LDT_desc();
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- int cpu = get_cpu();
- load_LDT_nolock(pc, cpu);
- put_cpu();
-}
-
-extern struct desc_ptr idt_descr;
-
-#endif /* !__ASSEMBLY__ */
-
-#endif
diff --git a/include/asm-x86_64/desc_defs.h b/include/asm-x86_64/desc_defs.h
deleted file mode 100644
index 0890040..0000000
--- a/include/asm-x86_64/desc_defs.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Written 2000 by Andi Kleen */
-#ifndef __ARCH_DESC_DEFS_H
-#define __ARCH_DESC_DEFS_H
-
-/*
- * Segment descriptor structure definitions, usable from both x86_64 and i386
- * archs.
- */
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-
-// 8 byte segment descriptor
-struct desc_struct {
- u16 limit0;
- u16 base0;
- unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
- unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
-} __attribute__((packed));
-
-struct n_desc_struct {
- unsigned int a,b;
-};
-
-enum {
- GATE_INTERRUPT = 0xE,
- GATE_TRAP = 0xF,
- GATE_CALL = 0xC,
-};
-
-// 16byte gate
-struct gate_struct {
- u16 offset_low;
- u16 segment;
- unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
- u16 offset_middle;
- u32 offset_high;
- u32 zero1;
-} __attribute__((packed));
-
-#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
-#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
-#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
-
-enum {
- DESC_TSS = 0x9,
- DESC_LDT = 0x2,
-};
-
-// LDT or TSS descriptor in the GDT. 16 bytes.
-struct ldttss_desc {
- u16 limit0;
- u16 base0;
- unsigned base1 : 8, type : 5, dpl : 2, p : 1;
- unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
- u32 base3;
- u32 zero1;
-} __attribute__((packed));
-
-struct desc_ptr {
- unsigned short size;
- unsigned long address;
-} __attribute__((packed)) ;
-
-
-#endif /* !__ASSEMBLY__ */
-
-#endif
diff --git a/include/asm-x86_64/device.h b/include/asm-x86_64/device.h
deleted file mode 100644
index 3afa03f..0000000
--- a/include/asm-x86_64/device.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#ifndef _ASM_X86_64_DEVICE_H
-#define _ASM_X86_64_DEVICE_H
-
-struct dev_archdata {
-#ifdef CONFIG_ACPI
- void *acpi_handle;
-#endif
-};
-
-#endif /* _ASM_X86_64_DEVICE_H */
diff --git a/include/asm-x86_64/div64.h b/include/asm-x86_64/div64.h
deleted file mode 100644
index 6cd978c..0000000
--- a/include/asm-x86_64/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
deleted file mode 100644
index 6897e2a..0000000
--- a/include/asm-x86_64/dma-mapping.h
+++ /dev/null
@@ -1,203 +0,0 @@
-#ifndef _X8664_DMA_MAPPING_H
-#define _X8664_DMA_MAPPING_H 1
-
-/*
- * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
- * documentation.
- */
-
-
-#include <asm/scatterlist.h>
-#include <asm/io.h>
-#include <asm/swiotlb.h>
-
-struct dma_mapping_ops {
- int (*mapping_error)(dma_addr_t dma_addr);
- void* (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
- size_t size, int direction);
- /* like map_single, but doesn't check the device mask */
- dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
- size_t size, int direction);
- void (*unmap_single)(struct device *dev, dma_addr_t addr,
- size_t size, int direction);
- void (*sync_single_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction);
- void (*sync_single_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction);
- void (*sync_single_range_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size, int direction);
- void (*sync_single_range_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size, int direction);
- void (*sync_sg_for_cpu)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int direction);
- void (*sync_sg_for_device)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int direction);
- int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
- void (*unmap_sg)(struct device *hwdev,
- struct scatterlist *sg, int nents,
- int direction);
- int (*dma_supported)(struct device *hwdev, u64 mask);
- int is_phys;
-};
-
-extern dma_addr_t bad_dma_address;
-extern const struct dma_mapping_ops* dma_ops;
-extern int iommu_merge;
-
-static inline int dma_mapping_error(dma_addr_t dma_addr)
-{
- if (dma_ops->mapping_error)
- return dma_ops->mapping_error(dma_addr);
-
- return (dma_addr == bad_dma_address);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-
-static inline dma_addr_t
-dma_map_single(struct device *hwdev, void *ptr, size_t size,
- int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_single(hwdev, ptr, size, direction);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
- int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_ops->unmap_single(dev, addr, size, direction);
-}
-
-#define dma_map_page(dev,page,offset,size,dir) \
- dma_map_single((dev), page_address(page)+(offset), (size), (dir))
-
-#define dma_unmap_page dma_unmap_single
-
-static inline void
-dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_cpu)
- dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
- direction);
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_device)
- dma_ops->sync_single_for_device(hwdev, dma_handle, size,
- direction);
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_cpu) {
- dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
- }
-
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_device)
- dma_ops->sync_single_range_for_device(hwdev, dma_handle,
- offset, size, direction);
-
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_cpu)
- dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
- flush_write_buffers();
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_device) {
- dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
- }
-
- flush_write_buffers();
-}
-
-static inline int
-dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_sg(hwdev, sg, nents, direction);
-}
-
-static inline void
-dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
- int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_ops->unmap_sg(hwdev, sg, nents, direction);
-}
-
-extern int dma_supported(struct device *hwdev, u64 mask);
-
-/* same for gart, swiotlb, and nommu */
-static inline int dma_get_cache_alignment(void)
-{
- return boot_cpu_data.x86_clflush_size;
-}
-
-#define dma_is_consistent(d, h) 1
-
-extern int dma_set_mask(struct device *dev, u64 mask);
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- flush_write_buffers();
-}
-
-extern struct device fallback_dev;
-extern int panic_on_overflow;
-
-#endif /* _X8664_DMA_MAPPING_H */
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
deleted file mode 100644
index a37c16f..0000000
--- a/include/asm-x86_64/dma.h
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- */
-
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
-
-#include <linux/spinlock.h> /* And spinlocks */
-#include <asm/io.h> /* need byte IO */
-#include <linux/delay.h>
-
-
-#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
-#define dma_outb outb_p
-#else
-#define dma_outb outb
-#endif
-
-#define dma_inb inb
-
-/*
- * NOTES about DMA transfers:
- *
- * controller 1: channels 0-3, byte operations, ports 00-1F
- * controller 2: channels 4-7, word operations, ports C0-DF
- *
- * - ALL registers are 8 bits only, regardless of transfer size
- * - channel 4 is not used - cascades 1 into 2.
- * - channels 0-3 are byte - addresses/counts are for physical bytes
- * - channels 5-7 are word - addresses/counts are for physical words
- * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- * - transfer count loaded to registers is 1 less than actual count
- * - controller 2 offsets are all even (2x offsets for controller 1)
- * - page registers for 5-7 don't use data bit 0, represent 128K pages
- * - page registers for 0-3 use bit 0, represent 64K pages
- *
- * DMA transfers are limited to the lower 16MB of _physical_ memory.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- * Address mapping for channels 0-3:
- *
- * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * P7 ... P0 A7 ... A0 A7 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Address mapping for channels 5-7:
- *
- * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
- * | ... | \ \ ... \ \ \ ... \ \
- * | ... | \ \ ... \ \ \ ... \ (not used)
- * | ... | \ \ ... \ \ \ ... \
- * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation.
- *
- */
-
-#define MAX_DMA_CHANNELS 8
-
-
-/* 16MB ISA DMA zone */
-#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
-
-/* 4GB broken PCI/AGP hardware bus master zone */
-#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
-
-/* Compat define for old dma zone */
-#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG 0x08 /* command register (w) */
-#define DMA1_STAT_REG 0x08 /* status register (r) */
-#define DMA1_REQ_REG 0x09 /* request register (w) */
-#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
-#define DMA1_MODE_REG 0x0B /* mode register (w) */
-#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
-#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
-#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
-
-#define DMA2_CMD_REG 0xD0 /* command register (w) */
-#define DMA2_STAT_REG 0xD0 /* status register (r) */
-#define DMA2_REQ_REG 0xD2 /* request register (w) */
-#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
-#define DMA2_MODE_REG 0xD6 /* mode register (w) */
-#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
-#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
-#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
-
-#define DMA_ADDR_0 0x00 /* DMA address registers */
-#define DMA_ADDR_1 0x02
-#define DMA_ADDR_2 0x04
-#define DMA_ADDR_3 0x06
-#define DMA_ADDR_4 0xC0
-#define DMA_ADDR_5 0xC4
-#define DMA_ADDR_6 0xC8
-#define DMA_ADDR_7 0xCC
-
-#define DMA_CNT_0 0x01 /* DMA count registers */
-#define DMA_CNT_1 0x03
-#define DMA_CNT_2 0x05
-#define DMA_CNT_3 0x07
-#define DMA_CNT_4 0xC2
-#define DMA_CNT_5 0xC6
-#define DMA_CNT_6 0xCA
-#define DMA_CNT_7 0xCE
-
-#define DMA_PAGE_0 0x87 /* DMA page registers */
-#define DMA_PAGE_1 0x83
-#define DMA_PAGE_2 0x81
-#define DMA_PAGE_3 0x82
-#define DMA_PAGE_5 0x8B
-#define DMA_PAGE_6 0x89
-#define DMA_PAGE_7 0x8A
-
-#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT 0x10
-
-
-extern spinlock_t dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&dma_spin_lock, flags);
- return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
- spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr, DMA1_MASK_REG);
- else
- dma_outb(dmanr & 3, DMA2_MASK_REG);
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr | 4, DMA1_MASK_REG);
- else
- dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while holding the DMA lock ! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(0, DMA1_CLEAR_FF_REG);
- else
- dma_outb(0, DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
- if (dmanr<=3)
- dma_outb(mode | dmanr, DMA1_MODE_REG);
- else
- dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
-}
-
-/* Set only the page register bits of the transfer address.
- * This is used for successive transfers when we know the contents of
- * the lower 16 bits of the DMA current address register, but a 64k boundary
- * may have been crossed.
- */
-static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
-{
- switch(dmanr) {
- case 0:
- dma_outb(pagenr, DMA_PAGE_0);
- break;
- case 1:
- dma_outb(pagenr, DMA_PAGE_1);
- break;
- case 2:
- dma_outb(pagenr, DMA_PAGE_2);
- break;
- case 3:
- dma_outb(pagenr, DMA_PAGE_3);
- break;
- case 5:
- dma_outb(pagenr & 0xfe, DMA_PAGE_5);
- break;
- case 6:
- dma_outb(pagenr & 0xfe, DMA_PAGE_6);
- break;
- case 7:
- dma_outb(pagenr & 0xfe, DMA_PAGE_7);
- break;
- }
-}
-
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- set_dma_page(dmanr, a>>16);
- if (dmanr <= 3) {
- dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- } else {
- dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- }
-}
-
-
-/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- count--;
- if (dmanr <= 3) {
- dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- } else {
- dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
- : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
- /* using short to get 16-bit wrap around */
- unsigned short count;
-
- count = 1 + dma_inb(io_port);
- count += dma_inb(io_port) << 8;
-
- return (dmanr<=3)? count : (count<<1);
-}
-
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-/* From PCI */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
-#endif /* _ASM_DMA_H */
diff --git a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h
deleted file mode 100644
index d02e32e..0000000
--- a/include/asm-x86_64/dmi.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _ASM_DMI_H
-#define _ASM_DMI_H 1
-
-#include <asm/io.h>
-
-#define DMI_MAX_DATA 2048
-
-extern int dmi_alloc_index;
-extern char dmi_alloc_data[DMI_MAX_DATA];
-
-/* This is so early that there is no good way to allocate dynamic memory.
- Allocate data in an BSS array. */
-static inline void *dmi_alloc(unsigned len)
-{
- int idx = dmi_alloc_index;
- if ((dmi_alloc_index += len) > DMI_MAX_DATA)
- return NULL;
- return dmi_alloc_data + idx;
-}
-
-#define dmi_ioremap early_ioremap
-#define dmi_iounmap early_iounmap
-
-#endif
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
deleted file mode 100644
index eedc085..0000000
--- a/include/asm-x86_64/dwarf2.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef _DWARF2_H
-#define _DWARF2_H 1
-
-
-#ifndef __ASSEMBLY__
-#warning "asm/dwarf2.h should be only included in pure assembly files"
-#endif
-
-/*
- Macros for dwarf2 CFI unwind table entries.
- See "as.info" for details on these pseudo ops. Unfortunately
- they are only supported in very new binutils, so define them
- away for older version.
- */
-
-#ifdef CONFIG_AS_CFI
-
-#define CFI_STARTPROC .cfi_startproc
-#define CFI_ENDPROC .cfi_endproc
-#define CFI_DEF_CFA .cfi_def_cfa
-#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
-#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
-#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
-#define CFI_OFFSET .cfi_offset
-#define CFI_REL_OFFSET .cfi_rel_offset
-#define CFI_REGISTER .cfi_register
-#define CFI_RESTORE .cfi_restore
-#define CFI_REMEMBER_STATE .cfi_remember_state
-#define CFI_RESTORE_STATE .cfi_restore_state
-#define CFI_UNDEFINED .cfi_undefined
-#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
-#define CFI_SIGNAL_FRAME .cfi_signal_frame
-#else
-#define CFI_SIGNAL_FRAME
-#endif
-
-#else
-
-/* use assembler line comment character # to ignore the arguments. */
-#define CFI_STARTPROC #
-#define CFI_ENDPROC #
-#define CFI_DEF_CFA #
-#define CFI_DEF_CFA_REGISTER #
-#define CFI_DEF_CFA_OFFSET #
-#define CFI_ADJUST_CFA_OFFSET #
-#define CFI_OFFSET #
-#define CFI_REL_OFFSET #
-#define CFI_REGISTER #
-#define CFI_RESTORE #
-#define CFI_REMEMBER_STATE #
-#define CFI_RESTORE_STATE #
-#define CFI_UNDEFINED #
-#define CFI_SIGNAL_FRAME #
-
-#endif
-
-#endif
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
deleted file mode 100644
index 3486e70..0000000
--- a/include/asm-x86_64/e820.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * structures and definitions for the int 15, ax=e820 memory map
- * scheme.
- *
- * In a nutshell, setup.S populates a scratch table in the
- * empty_zero_block that contains a list of usable address/size
- * duples. setup.c, this information is transferred into the e820map,
- * and in init.c/numa.c, that new information is used to mark pages
- * reserved or not.
- */
-#ifndef __E820_HEADER
-#define __E820_HEADER
-
-#define E820MAP 0x2d0 /* our map */
-#define E820MAX 128 /* number of entries in E820MAP */
-#define E820NR 0x1e8 /* # entries in E820MAP */
-
-#define E820_RAM 1
-#define E820_RESERVED 2
-#define E820_ACPI 3
-#define E820_NVS 4
-
-#ifndef __ASSEMBLY__
-struct e820entry {
- u64 addr; /* start of memory segment */
- u64 size; /* size of memory segment */
- u32 type; /* type of memory segment */
-} __attribute__((packed));
-
-struct e820map {
- u32 nr_map;
- struct e820entry map[E820MAX];
-};
-
-extern unsigned long find_e820_area(unsigned long start, unsigned long end,
- unsigned size);
-extern void add_memory_region(unsigned long start, unsigned long size,
- int type);
-extern void setup_memory_region(void);
-extern void contig_e820_setup(void);
-extern unsigned long e820_end_of_ram(void);
-extern void e820_reserve_resources(void);
-extern void e820_mark_nosave_regions(void);
-extern void e820_print_map(char *who);
-extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
-extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
-extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
-
-extern void e820_setup_gap(void);
-extern void e820_register_active_regions(int nid,
- unsigned long start_pfn, unsigned long end_pfn);
-
-extern void finish_e820_parsing(void);
-
-extern struct e820map e820;
-
-extern unsigned ebda_addr, ebda_size;
-extern unsigned long nodemap_addr, nodemap_size;
-#endif/*!__ASSEMBLY__*/
-
-#endif/*__E820_HEADER*/
diff --git a/include/asm-x86_64/edac.h b/include/asm-x86_64/edac.h
deleted file mode 100644
index cad1cd4..0000000
--- a/include/asm-x86_64/edac.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef ASM_EDAC_H
-#define ASM_EDAC_H
-
-/* ECC atomic, DMA, SMP and interrupt safe scrub function */
-
-static __inline__ void atomic_scrub(void *va, u32 size)
-{
- unsigned int *virt_addr = va;
- u32 i;
-
- for (i = 0; i < size / 4; i++, virt_addr++)
- /* Very carefully read and write to memory atomically
- * so we are interrupt, DMA and SMP safe.
- */
- __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
-}
-
-#endif
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
deleted file mode 100644
index b4fbe47..0000000
--- a/include/asm-x86_64/elf.h
+++ /dev/null
@@ -1,180 +0,0 @@
-#ifndef __ASM_X86_64_ELF_H
-#define __ASM_X86_64_ELF_H
-
-/*
- * ELF register definitions..
- */
-
-#include <asm/ptrace.h>
-#include <asm/user.h>
-
-/* x86-64 relocation types */
-#define R_X86_64_NONE 0 /* No reloc */
-#define R_X86_64_64 1 /* Direct 64 bit */
-#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
-#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
-#define R_X86_64_PLT32 4 /* 32 bit PLT address */
-#define R_X86_64_COPY 5 /* Copy symbol at runtime */
-#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
-#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
-#define R_X86_64_RELATIVE 8 /* Adjust by program base */
-#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
- offset to GOT */
-#define R_X86_64_32 10 /* Direct 32 bit zero extended */
-#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
-#define R_X86_64_16 12 /* Direct 16 bit zero extended */
-#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
-#define R_X86_64_8 14 /* Direct 8 bit sign extended */
-#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
-
-#define R_X86_64_NUM 16
-
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef struct user_i387_struct elf_fpregset_t;
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_CLASS ELFCLASS64
-#define ELF_DATA ELFDATA2LSB
-#define ELF_ARCH EM_X86_64
-
-#ifdef __KERNEL__
-#include <asm/processor.h>
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) \
- ((x)->e_machine == EM_X86_64)
-
-
-/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
- contains a pointer to a function which might be registered using `atexit'.
- This provides a mean for the dynamic linker to call DT_FINI functions for
- shared libraries that have been loaded before the code runs.
-
- A value of 0 tells we have no such handler.
-
- We might as well make sure everything else is cleared too (except for %esp),
- just to make things more deterministic.
- */
-#define ELF_PLAT_INIT(_r, load_addr) do { \
- struct task_struct *cur = current; \
- (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
- (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
- (_r)->rax = 0; \
- (_r)->r8 = 0; \
- (_r)->r9 = 0; \
- (_r)->r10 = 0; \
- (_r)->r11 = 0; \
- (_r)->r12 = 0; \
- (_r)->r13 = 0; \
- (_r)->r14 = 0; \
- (_r)->r15 = 0; \
- cur->thread.fs = 0; cur->thread.gs = 0; \
- cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
- cur->thread.ds = 0; cur->thread.es = 0; \
- clear_thread_flag(TIF_IA32); \
-} while (0)
-
-#define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE 4096
-
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
-
-/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
- now struct_user_regs, they are different). Assumes current is the process
- getting dumped. */
-
-#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
- unsigned v; \
- (pr_reg)[0] = (regs)->r15; \
- (pr_reg)[1] = (regs)->r14; \
- (pr_reg)[2] = (regs)->r13; \
- (pr_reg)[3] = (regs)->r12; \
- (pr_reg)[4] = (regs)->rbp; \
- (pr_reg)[5] = (regs)->rbx; \
- (pr_reg)[6] = (regs)->r11; \
- (pr_reg)[7] = (regs)->r10; \
- (pr_reg)[8] = (regs)->r9; \
- (pr_reg)[9] = (regs)->r8; \
- (pr_reg)[10] = (regs)->rax; \
- (pr_reg)[11] = (regs)->rcx; \
- (pr_reg)[12] = (regs)->rdx; \
- (pr_reg)[13] = (regs)->rsi; \
- (pr_reg)[14] = (regs)->rdi; \
- (pr_reg)[15] = (regs)->orig_rax; \
- (pr_reg)[16] = (regs)->rip; \
- (pr_reg)[17] = (regs)->cs; \
- (pr_reg)[18] = (regs)->eflags; \
- (pr_reg)[19] = (regs)->rsp; \
- (pr_reg)[20] = (regs)->ss; \
- (pr_reg)[21] = current->thread.fs; \
- (pr_reg)[22] = current->thread.gs; \
- asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
- asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
- asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
- asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
-} while(0);
-
-/* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
- but it's not easy, and we've already done it here. */
-
-#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
-
-/* This yields a string that ld.so will use to load implementation
- specific libraries for optimization. This is more specific in
- intent than poking at uname or /proc/cpuinfo.
-
- For the moment, we have only optimizations for the Intel generations,
- but that could change... */
-
-/* I'm not sure if we can use '-' here */
-#define ELF_PLATFORM ("x86_64")
-
-extern void set_personality_64bit(void);
-#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
-/*
- * An executable for which elf_read_implies_exec() returns TRUE will
- * have the READ_IMPLIES_EXEC personality flag set automatically.
- */
-#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
-
-struct task_struct;
-
-extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
-extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
-
-#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
-#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
-
-/* 1GB for 64bit, 8MB for 32bit */
-#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
-
-
-#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
-struct linux_binprm;
-extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack);
-
-extern int vdso_enabled;
-
-#define ARCH_DLINFO \
-do if (vdso_enabled) { \
- NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
-} while (0)
-
-#endif
-
-#endif
diff --git a/include/asm-x86_64/emergency-restart.h b/include/asm-x86_64/emergency-restart.h
deleted file mode 100644
index 680c395..0000000
--- a/include/asm-x86_64/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-extern void machine_emergency_restart(void);
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/include/asm-x86_64/errno.h b/include/asm-x86_64/errno.h
deleted file mode 100644
index 3111821..0000000
--- a/include/asm-x86_64/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _X8664_ERRNO_H
-#define _X8664_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif
diff --git a/include/asm-x86_64/fb.h b/include/asm-x86_64/fb.h
deleted file mode 100644
index 60548e6..0000000
--- a/include/asm-x86_64/fb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/fb.h>
-#include <linux/fs.h>
-#include <asm/page.h>
-
-static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
- unsigned long off)
-{
- if (boot_cpu_data.x86 > 3)
- pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
-}
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86_64/fcntl.h b/include/asm-x86_64/fcntl.h
deleted file mode 100644
index 46ab12d..0000000
--- a/include/asm-x86_64/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
deleted file mode 100644
index cdfbe4a..0000000
--- a/include/asm-x86_64/fixmap.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <linux/kernel.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#include <asm/vsyscall.h>
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process.
- *
- * These 'compile-time allocated' memory buffers are
- * fixed-size 4k pages (or larger if used with an increment
- * higher than 1). Use set_fixmap(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-enum fixed_addresses {
- VSYSCALL_LAST_PAGE,
- VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
- VSYSCALL_HPET,
- FIX_DBGP_BASE,
- FIX_EARLYCON_MEM_BASE,
- FIX_HPET_BASE,
- FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
- FIX_IO_APIC_BASE_0,
- FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
- __end_of_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
-#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
-/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
-#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
-#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without translation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-#endif
diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h
deleted file mode 100644
index 6ea13c3..0000000
--- a/include/asm-x86_64/floppy.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Architecture specific parts of the Floppy driver
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995
- */
-#ifndef __ASM_X86_64_FLOPPY_H
-#define __ASM_X86_64_FLOPPY_H
-
-#include <linux/vmalloc.h>
-
-
-/*
- * The DMA channel used by the floppy controller cannot access data at
- * addresses >= 16MB
- *
- * Went back to the 1MB limit, as some people had problems with the floppy
- * driver otherwise. It doesn't matter much for performance anyway, as most
- * floppy accesses go through the track buffer.
- */
-#define _CROSS_64KB(a,s,vdma) \
-(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
-
-#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
-
-
-#define SW fd_routine[use_virtual_dma&1]
-#define CSW fd_routine[can_use_virtual_dma & 1]
-
-
-#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
-
-#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
-#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
-#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
-#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
-#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
-#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
-#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
-
-#define FLOPPY_CAN_FALLBACK_ON_NODMA
-
-static int virtual_dma_count;
-static int virtual_dma_residue;
-static char *virtual_dma_addr;
-static int virtual_dma_mode;
-static int doing_pdma;
-
-static irqreturn_t floppy_hardint(int irq, void *dev_id)
-{
- register unsigned char st;
-
-#undef TRACE_FLPY_INT
-
-#ifdef TRACE_FLPY_INT
- static int calls=0;
- static int bytes=0;
- static int dma_wait=0;
-#endif
- if (!doing_pdma)
- return floppy_interrupt(irq, dev_id);
-
-#ifdef TRACE_FLPY_INT
- if(!calls)
- bytes = virtual_dma_count;
-#endif
-
- {
- register int lcount;
- register char *lptr;
-
- st = 1;
- for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
- lcount; lcount--, lptr++) {
- st=inb(virtual_dma_port+4) & 0xa0 ;
- if(st != 0xa0)
- break;
- if(virtual_dma_mode)
- outb_p(*lptr, virtual_dma_port+5);
- else
- *lptr = inb_p(virtual_dma_port+5);
- }
- virtual_dma_count = lcount;
- virtual_dma_addr = lptr;
- st = inb(virtual_dma_port+4);
- }
-
-#ifdef TRACE_FLPY_INT
- calls++;
-#endif
- if(st == 0x20)
- return IRQ_HANDLED;
- if(!(st & 0x20)) {
- virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
-#ifdef TRACE_FLPY_INT
- printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
- virtual_dma_count, virtual_dma_residue, calls, bytes,
- dma_wait);
- calls = 0;
- dma_wait=0;
-#endif
- doing_pdma = 0;
- floppy_interrupt(irq, dev_id);
- return IRQ_HANDLED;
- }
-#ifdef TRACE_FLPY_INT
- if(!virtual_dma_count)
- dma_wait++;
-#endif
- return IRQ_HANDLED;
-}
-
-static void fd_disable_dma(void)
-{
- if(! (can_use_virtual_dma & 1))
- disable_dma(FLOPPY_DMA);
- doing_pdma = 0;
- virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
-}
-
-static int vdma_request_dma(unsigned int dmanr, const char * device_id)
-{
- return 0;
-}
-
-static void vdma_nop(unsigned int dummy)
-{
-}
-
-
-static int vdma_get_dma_residue(unsigned int dummy)
-{
- return virtual_dma_count + virtual_dma_residue;
-}
-
-
-static int fd_request_irq(void)
-{
- if(can_use_virtual_dma)
- return request_irq(FLOPPY_IRQ, floppy_hardint,
- IRQF_DISABLED, "floppy", NULL);
- else
- return request_irq(FLOPPY_IRQ, floppy_interrupt,
- IRQF_DISABLED, "floppy", NULL);
-}
-
-static unsigned long dma_mem_alloc(unsigned long size)
-{
- return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
-}
-
-
-static unsigned long vdma_mem_alloc(unsigned long size)
-{
- return (unsigned long) vmalloc(size);
-
-}
-
-#define nodma_mem_alloc(size) vdma_mem_alloc(size)
-
-static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
-{
- if((unsigned long) addr >= (unsigned long) high_memory)
- vfree((void *)addr);
- else
- free_pages(addr, get_order(size));
-}
-
-#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
-
-static void _fd_chose_dma_mode(char *addr, unsigned long size)
-{
- if(can_use_virtual_dma == 2) {
- if((unsigned long) addr >= (unsigned long) high_memory ||
- isa_virt_to_bus(addr) >= 0x1000000 ||
- _CROSS_64KB(addr, size, 0))
- use_virtual_dma = 1;
- else
- use_virtual_dma = 0;
- } else {
- use_virtual_dma = can_use_virtual_dma & 1;
- }
-}
-
-#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
-
-
-static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
-{
- doing_pdma = 1;
- virtual_dma_port = io;
- virtual_dma_mode = (mode == DMA_MODE_WRITE);
- virtual_dma_addr = addr;
- virtual_dma_count = size;
- virtual_dma_residue = 0;
- return 0;
-}
-
-static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
-{
-#ifdef FLOPPY_SANITY_CHECK
- if (CROSS_64KB(addr, size)) {
- printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
- return -1;
- }
-#endif
- /* actual, physical DMA */
- doing_pdma = 0;
- clear_dma_ff(FLOPPY_DMA);
- set_dma_mode(FLOPPY_DMA,mode);
- set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
- set_dma_count(FLOPPY_DMA,size);
- enable_dma(FLOPPY_DMA);
- return 0;
-}
-
-static struct fd_routine_l {
- int (*_request_dma)(unsigned int dmanr, const char * device_id);
- void (*_free_dma)(unsigned int dmanr);
- int (*_get_dma_residue)(unsigned int dummy);
- unsigned long (*_dma_mem_alloc) (unsigned long size);
- int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
-} fd_routine[] = {
- {
- request_dma,
- free_dma,
- get_dma_residue,
- dma_mem_alloc,
- hard_dma_setup
- },
- {
- vdma_request_dma,
- vdma_nop,
- vdma_get_dma_residue,
- vdma_mem_alloc,
- vdma_dma_setup
- }
-};
-
-
-static int FDC1 = 0x3f0;
-static int FDC2 = -1;
-
-/*
- * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
- * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
- * coincides with another rtc CMOS user. Paul G.
- */
-#define FLOPPY0_TYPE ({ \
- unsigned long flags; \
- unsigned char val; \
- spin_lock_irqsave(&rtc_lock, flags); \
- val = (CMOS_READ(0x10) >> 4) & 15; \
- spin_unlock_irqrestore(&rtc_lock, flags); \
- val; \
-})
-
-#define FLOPPY1_TYPE ({ \
- unsigned long flags; \
- unsigned char val; \
- spin_lock_irqsave(&rtc_lock, flags); \
- val = CMOS_READ(0x10) & 15; \
- spin_unlock_irqrestore(&rtc_lock, flags); \
- val; \
-})
-
-#define N_FDC 2
-#define N_DRIVE 8
-
-#define FLOPPY_MOTOR_MASK 0xf0
-
-#define AUTO_DMA
-
-#define EXTRA_FLOPPY_PARAMS
-
-#endif /* __ASM_X86_64_FLOPPY_H */
diff --git a/include/asm-x86_64/fpu32.h b/include/asm-x86_64/fpu32.h
deleted file mode 100644
index 4153db5..0000000
--- a/include/asm-x86_64/fpu32.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _FPU32_H
-#define _FPU32_H 1
-
-struct _fpstate_ia32;
-
-int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave);
-int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf,
- struct pt_regs *regs, int fsave);
-
-#endif
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
deleted file mode 100644
index 5cdfb08..0000000
--- a/include/asm-x86_64/futex.h
+++ /dev/null
@@ -1,125 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#ifdef __KERNEL__
-
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
- __asm__ __volatile ( \
-"1: " insn "\n" \
-"2: .section .fixup,\"ax\"\n\
-3: mov %3, %1\n\
- jmp 2b\n\
- .previous\n\
- .section __ex_table,\"a\"\n\
- .align 8\n\
- .quad 1b,3b\n\
- .previous" \
- : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \
- : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
-
-#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
- __asm__ __volatile ( \
-"1: movl %2, %0\n\
- movl %0, %3\n" \
- insn "\n" \
-"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
- jnz 1b\n\
-3: .section .fixup,\"ax\"\n\
-4: mov %5, %1\n\
- jmp 3b\n\
- .previous\n\
- .section __ex_table,\"a\"\n\
- .align 8\n\
- .quad 1b,4b,2b,4b\n\
- .previous" \
- : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \
- "=&r" (tem) \
- : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret, tem;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- pagefault_disable();
-
- switch (op) {
- case FUTEX_OP_SET:
- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
- break;
- case FUTEX_OP_ADD:
- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
- uaddr, oparg);
- break;
- case FUTEX_OP_OR:
- __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
- break;
- case FUTEX_OP_ANDN:
- __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
- break;
- case FUTEX_OP_XOR:
- __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
- break;
- default:
- ret = -ENOSYS;
- }
-
- pagefault_enable();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
-{
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- __asm__ __volatile__(
- "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
-
- "2: .section .fixup, \"ax\" \n"
- "3: mov %2, %0 \n"
- " jmp 2b \n"
- " .previous \n"
-
- " .section __ex_table, \"a\" \n"
- " .align 8 \n"
- " .quad 1b,3b \n"
- " .previous \n"
-
- : "=a" (oldval), "=m" (*uaddr)
- : "i" (-EFAULT), "r" (newval), "0" (oldval)
- : "memory"
- );
-
- return oldval;
-}
-
-#endif
-#endif
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
deleted file mode 100644
index d7e516c..0000000
--- a/include/asm-x86_64/genapic.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef _ASM_GENAPIC_H
-#define _ASM_GENAPIC_H 1
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch data struct.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-
-struct genapic {
- char *name;
- u32 int_delivery_mode;
- u32 int_dest_mode;
- int (*apic_id_registered)(void);
- cpumask_t (*target_cpus)(void);
- cpumask_t (*vector_allocation_domain)(int cpu);
- void (*init_apic_ldr)(void);
- /* ipi */
- void (*send_IPI_mask)(cpumask_t mask, int vector);
- void (*send_IPI_allbutself)(int vector);
- void (*send_IPI_all)(int vector);
- /* */
- unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
- unsigned int (*phys_pkg_id)(int index_msb);
-};
-
-extern struct genapic *genapic;
-
-extern struct genapic apic_flat;
-extern struct genapic apic_physflat;
-
-#endif
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h
deleted file mode 100644
index 95d5e09..0000000
--- a/include/asm-x86_64/hardirq.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __ASM_HARDIRQ_H
-#define __ASM_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-#include <asm/pda.h>
-#include <asm/apic.h>
-
-/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
-#define MAX_HARDIRQS_PER_CPU NR_VECTORS
-
-#define __ARCH_IRQ_STAT 1
-
-#define local_softirq_pending() read_pda(__softirq_pending)
-
-#define __ARCH_SET_SOFTIRQ_PENDING 1
-
-#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
-#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
-
-extern void ack_bad_irq(unsigned int irq);
-
-#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
deleted file mode 100644
index 79bb950..0000000
--- a/include/asm-x86_64/hpet.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _ASM_X8664_HPET_H
-#define _ASM_X8664_HPET_H 1
-
-#include <asm-i386/hpet.h>
-
-#define HPET_TICK_RATE (HZ * 100000UL)
-
-extern int hpet_rtc_timer_init(void);
-extern int hpet_arch_init(void);
-extern int hpet_timer_stop_set_go(unsigned long tick);
-extern int hpet_reenable(void);
-extern unsigned int hpet_calibrate_tsc(void);
-
-extern int hpet_use_timer;
-extern unsigned long hpet_period;
-extern unsigned long hpet_tick;
-
-#endif
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
deleted file mode 100644
index 09dfc18..0000000
--- a/include/asm-x86_64/hw_irq.h
+++ /dev/null
@@ -1,175 +0,0 @@
-#ifndef _ASM_HW_IRQ_H
-#define _ASM_HW_IRQ_H
-
-/*
- * linux/include/asm/hw_irq.h
- *
- * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- * moved some of the old arch/i386/kernel/irq.h to here. VY
- *
- * IRQ/IPI changes taken from work by Thomas Radke
- * <tomsoft@informatik.tu-chemnitz.de>
- *
- * hacked by Andi Kleen for x86-64.
- */
-
-#ifndef __ASSEMBLY__
-#include <asm/atomic.h>
-#include <asm/irq.h>
-#include <linux/profile.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
-#endif
-
-#define NMI_VECTOR 0x02
-/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
- */
-#define FIRST_EXTERNAL_VECTOR 0x20
-
-#define IA32_SYSCALL_VECTOR 0x80
-
-
-/* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
- * cleanup after irq migration.
- */
-#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
-
-/*
- * Vectors 0x30-0x3f are used for ISA interrupts.
- */
-#define IRQ0_VECTOR FIRST_EXTERNAL_VECTOR + 0x10
-#define IRQ1_VECTOR IRQ0_VECTOR + 1
-#define IRQ2_VECTOR IRQ0_VECTOR + 2
-#define IRQ3_VECTOR IRQ0_VECTOR + 3
-#define IRQ4_VECTOR IRQ0_VECTOR + 4
-#define IRQ5_VECTOR IRQ0_VECTOR + 5
-#define IRQ6_VECTOR IRQ0_VECTOR + 6
-#define IRQ7_VECTOR IRQ0_VECTOR + 7
-#define IRQ8_VECTOR IRQ0_VECTOR + 8
-#define IRQ9_VECTOR IRQ0_VECTOR + 9
-#define IRQ10_VECTOR IRQ0_VECTOR + 10
-#define IRQ11_VECTOR IRQ0_VECTOR + 11
-#define IRQ12_VECTOR IRQ0_VECTOR + 12
-#define IRQ13_VECTOR IRQ0_VECTOR + 13
-#define IRQ14_VECTOR IRQ0_VECTOR + 14
-#define IRQ15_VECTOR IRQ0_VECTOR + 15
-
-/*
- * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
- *
- * some of the following vectors are 'rare', they are merged
- * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
- * TLB, reschedule and local APIC vectors are performance-critical.
- */
-#define SPURIOUS_APIC_VECTOR 0xff
-#define ERROR_APIC_VECTOR 0xfe
-#define RESCHEDULE_VECTOR 0xfd
-#define CALL_FUNCTION_VECTOR 0xfc
-/* fb free - please don't readd KDB here because it's useless
- (hint - think what a NMI bit does to a vector) */
-#define THERMAL_APIC_VECTOR 0xfa
-#define THRESHOLD_APIC_VECTOR 0xf9
-/* f8 free */
-#define INVALIDATE_TLB_VECTOR_END 0xf7
-#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
-
-#define NUM_INVALIDATE_TLB_VECTORS 8
-
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR 0xef
-
-/*
- * First APIC vector available to drivers: (vectors 0x30-0xee)
- * we start at 0x41 to spread out vectors evenly between priority
- * levels. (0x80 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
-#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
-
-
-#ifndef __ASSEMBLY__
-
-/* Interrupt handlers registered during init_IRQ */
-void apic_timer_interrupt(void);
-void spurious_interrupt(void);
-void error_interrupt(void);
-void reschedule_interrupt(void);
-void call_function_interrupt(void);
-void irq_move_cleanup_interrupt(void);
-void invalidate_interrupt0(void);
-void invalidate_interrupt1(void);
-void invalidate_interrupt2(void);
-void invalidate_interrupt3(void);
-void invalidate_interrupt4(void);
-void invalidate_interrupt5(void);
-void invalidate_interrupt6(void);
-void invalidate_interrupt7(void);
-void thermal_interrupt(void);
-void threshold_interrupt(void);
-void i8254_timer_resume(void);
-
-typedef int vector_irq_t[NR_VECTORS];
-DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern void __setup_vector_irq(int cpu);
-extern spinlock_t vector_lock;
-
-/*
- * Various low-level irq details needed by irq.c, process.c,
- * time.c, io_apic.c and smp.c
- *
- * Interrupt entry/exit code at both C and assembly level
- */
-
-extern void disable_8259A_irq(unsigned int irq);
-extern void enable_8259A_irq(unsigned int irq);
-extern int i8259A_irq_pending(unsigned int irq);
-extern void make_8259A_irq(unsigned int irq);
-extern void init_8259A(int aeoi);
-extern void send_IPI_self(int vector);
-extern void init_VISWS_APIC_irqs(void);
-extern void setup_IO_APIC(void);
-extern void disable_IO_APIC(void);
-extern void print_IO_APIC(void);
-extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-extern void send_IPI(int dest, int vector);
-extern void setup_ioapic_dest(void);
-
-extern unsigned long io_apic_irqs;
-
-extern atomic_t irq_err_count;
-extern atomic_t irq_mis_count;
-
-#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
-#include <asm/ptrace.h>
-
-#define IRQ_NAME2(nr) nr##_interrupt(void)
-#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
-
-/*
- * SMP has a few special interrupts for IPI messages
- */
-
-#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
-__asm__( \
-"\n.p2align\n" \
-"IRQ" #nr "_interrupt:\n\t" \
- "push $~(" #nr ") ; " \
- "jmp common_interrupt");
-
-#define platform_legacy_irq(irq) ((irq) < 16)
-
-#endif
-
-#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-x86_64/hypertransport.h b/include/asm-x86_64/hypertransport.h
deleted file mode 100644
index 5cbf9fa..0000000
--- a/include/asm-x86_64/hypertransport.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/hypertransport.h>
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
deleted file mode 100644
index 0217b74..0000000
--- a/include/asm-x86_64/i387.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * include/asm-x86_64/i387.h
- *
- * Copyright (C) 1994 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * General FPU state handling cleanups
- * Gareth Hughes <gareth@valinux.com>, May 2000
- * x86-64 work by Andi Kleen 2002
- */
-
-#ifndef __ASM_X86_64_I387_H
-#define __ASM_X86_64_I387_H
-
-#include <linux/sched.h>
-#include <asm/processor.h>
-#include <asm/sigcontext.h>
-#include <asm/user.h>
-#include <asm/thread_info.h>
-#include <asm/uaccess.h>
-
-extern void fpu_init(void);
-extern unsigned int mxcsr_feature_mask;
-extern void mxcsr_feature_mask_init(void);
-extern void init_fpu(struct task_struct *child);
-extern int save_i387(struct _fpstate __user *buf);
-extern asmlinkage void math_state_restore(void);
-
-/*
- * FPU lazy state save handling...
- */
-
-#define unlazy_fpu(tsk) do { \
- if (task_thread_info(tsk)->status & TS_USEDFPU) \
- save_init_fpu(tsk); \
- else \
- tsk->fpu_counter = 0; \
-} while (0)
-
-/* Ignore delayed exceptions from user space */
-static inline void tolerant_fwait(void)
-{
- asm volatile("1: fwait\n"
- "2:\n"
- " .section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 1b,2b\n"
- " .previous\n");
-}
-
-#define clear_fpu(tsk) do { \
- if (task_thread_info(tsk)->status & TS_USEDFPU) { \
- tolerant_fwait(); \
- task_thread_info(tsk)->status &= ~TS_USEDFPU; \
- stts(); \
- } \
-} while (0)
-
-/*
- * ptrace request handers...
- */
-extern int get_fpregs(struct user_i387_struct __user *buf,
- struct task_struct *tsk);
-extern int set_fpregs(struct task_struct *tsk,
- struct user_i387_struct __user *buf);
-
-/*
- * i387 state interaction
- */
-#define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr)
-#define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd)
-#define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd)
-#define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd)
-#define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val))
-#define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
-#define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
-
-#define X87_FSW_ES (1 << 7) /* Exception Summary */
-
-/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
- is pending. Clear the x87 state here by setting it to fixed
- values. The kernel data segment can be sometimes 0 and sometimes
- new user value. Both should be ok.
- Use the PDA as safe address because it should be already in L1. */
-static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
-{
- if (unlikely(fx->swd & X87_FSW_ES))
- asm volatile("fnclex");
- alternative_input(ASM_NOP8 ASM_NOP2,
- " emms\n" /* clear stack tags */
- " fildl %%gs:0", /* load to clear state */
- X86_FEATURE_FXSAVE_LEAK);
-}
-
-static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
-{
- int err;
-
- asm volatile("1: rex64/fxrstor (%[fx])\n\t"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl $-1,%[err]\n"
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 1b,3b\n"
- ".previous"
- : [err] "=r" (err)
-#if 0 /* See comment in __fxsave_clear() below. */
- : [fx] "r" (fx), "m" (*fx), "0" (0));
-#else
- : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
-#endif
- if (unlikely(err))
- init_fpu(current);
- return err;
-}
-
-static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
-{
- int err;
-
- asm volatile("1: rex64/fxsave (%[fx])\n\t"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl $-1,%[err]\n"
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 1b,3b\n"
- ".previous"
- : [err] "=r" (err), "=m" (*fx)
-#if 0 /* See comment in __fxsave_clear() below. */
- : [fx] "r" (fx), "0" (0));
-#else
- : [fx] "cdaSDb" (fx), "0" (0));
-#endif
- if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct)))
- err = -EFAULT;
- /* No need to clear here because the caller clears USED_MATH */
- return err;
-}
-
-static inline void __fxsave_clear(struct task_struct *tsk)
-{
- /* Using "rex64; fxsave %0" is broken because, if the memory operand
- uses any extended registers for addressing, a second REX prefix
- will be generated (to the assembler, rex64 followed by semicolon
- is a separate instruction), and hence the 64-bitness is lost. */
-#if 0
- /* Using "fxsaveq %0" would be the ideal choice, but is only supported
- starting with gas 2.16. */
- __asm__ __volatile__("fxsaveq %0"
- : "=m" (tsk->thread.i387.fxsave));
-#elif 0
- /* Using, as a workaround, the properly prefixed form below isn't
- accepted by any binutils version so far released, complaining that
- the same type of prefix is used twice if an extended register is
- needed for addressing (fix submitted to mainline 2005-11-21). */
- __asm__ __volatile__("rex64/fxsave %0"
- : "=m" (tsk->thread.i387.fxsave));
-#else
- /* This, however, we can work around by forcing the compiler to select
- an addressing mode that doesn't require extended registers. */
- __asm__ __volatile__("rex64/fxsave %P2(%1)"
- : "=m" (tsk->thread.i387.fxsave)
- : "cdaSDb" (tsk),
- "i" (offsetof(__typeof__(*tsk),
- thread.i387.fxsave)));
-#endif
- clear_fpu_state(&tsk->thread.i387.fxsave);
-}
-
-static inline void kernel_fpu_begin(void)
-{
- struct thread_info *me = current_thread_info();
- preempt_disable();
- if (me->status & TS_USEDFPU) {
- __fxsave_clear(me->task);
- me->status &= ~TS_USEDFPU;
- return;
- }
- clts();
-}
-
-static inline void kernel_fpu_end(void)
-{
- stts();
- preempt_enable();
-}
-
-static inline void save_init_fpu(struct task_struct *tsk)
-{
- __fxsave_clear(tsk);
- task_thread_info(tsk)->status &= ~TS_USEDFPU;
- stts();
-}
-
-/*
- * This restores directly out of user space. Exceptions are handled.
- */
-static inline int restore_i387(struct _fpstate __user *buf)
-{
- return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
-}
-
-#endif /* __ASM_X86_64_I387_H */
diff --git a/include/asm-x86_64/i8253.h b/include/asm-x86_64/i8253.h
deleted file mode 100644
index 015d8df..0000000
--- a/include/asm-x86_64/i8253.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_I8253_H__
-#define __ASM_I8253_H__
-
-extern spinlock_t i8253_lock;
-
-#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
deleted file mode 100644
index 0190b7c..0000000
--- a/include/asm-x86_64/ia32.h
+++ /dev/null
@@ -1,178 +0,0 @@
-#ifndef _ASM_X86_64_IA32_H
-#define _ASM_X86_64_IA32_H
-
-
-#ifdef CONFIG_IA32_EMULATION
-
-#include <linux/compat.h>
-
-/*
- * 32 bit structures for IA32 support.
- */
-
-#include <asm/sigcontext32.h>
-
-/* signal.h */
-struct sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
- compat_sigset_t sa_mask; /* A 32 bit mask */
-};
-
-struct old_sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- compat_old_sigset_t sa_mask; /* A 32 bit mask */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
-};
-
-typedef struct sigaltstack_ia32 {
- unsigned int ss_sp;
- int ss_flags;
- unsigned int ss_size;
-} stack_ia32_t;
-
-struct ucontext_ia32 {
- unsigned int uc_flags;
- unsigned int uc_link;
- stack_ia32_t uc_stack;
- struct sigcontext_ia32 uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-/* This matches struct stat64 in glibc2.2, hence the absolutely
- * insane amounts of padding around dev_t's.
- */
-struct stat64 {
- unsigned long long st_dev;
- unsigned char __pad0[4];
-
-#define STAT64_HAS_BROKEN_ST_INO 1
- unsigned int __st_ino;
-
- unsigned int st_mode;
- unsigned int st_nlink;
-
- unsigned int st_uid;
- unsigned int st_gid;
-
- unsigned long long st_rdev;
- unsigned char __pad3[4];
-
- long long st_size;
- unsigned int st_blksize;
-
- long long st_blocks;/* Number 512-byte blocks allocated. */
-
- unsigned st_atime;
- unsigned st_atime_nsec;
- unsigned st_mtime;
- unsigned st_mtime_nsec;
- unsigned st_ctime;
- unsigned st_ctime_nsec;
-
- unsigned long long st_ino;
-} __attribute__((packed));
-
-typedef struct compat_siginfo{
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[((128/sizeof(int)) - 3)];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- int _overrun_incr; /* amount to add to overrun */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
-struct sigframe32
-{
- u32 pretcode;
- int sig;
- struct sigcontext_ia32 sc;
- struct _fpstate_ia32 fpstate;
- unsigned int extramask[_COMPAT_NSIG_WORDS-1];
-};
-
-struct rt_sigframe32
-{
- u32 pretcode;
- int sig;
- u32 pinfo;
- u32 puc;
- compat_siginfo_t info;
- struct ucontext_ia32 uc;
- struct _fpstate_ia32 fpstate;
-};
-
-struct ustat32 {
- __u32 f_tfree;
- compat_ino_t f_tinode;
- char f_fname[6];
- char f_fpack[6];
-};
-
-#define IA32_STACK_TOP IA32_PAGE_OFFSET
-
-#ifdef __KERNEL__
-struct user_desc;
-struct siginfo_t;
-int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
-int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
-int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
-
-struct linux_binprm;
-extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
- unsigned long stack_top, int exec_stack);
-struct mm_struct;
-extern void ia32_pick_mmap_layout(struct mm_struct *mm);
-
-#endif
-
-#endif /* !CONFIG_IA32_SUPPORT */
-
-#endif
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
deleted file mode 100644
index 5b52ce5..0000000
--- a/include/asm-x86_64/ia32_unistd.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _ASM_X86_64_IA32_UNISTD_H_
-#define _ASM_X86_64_IA32_UNISTD_H_
-
-/*
- * This file contains the system call numbers of the ia32 port,
- * this is for the kernel only.
- * Only add syscalls here where some part of the kernel needs to know
- * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK
- */
-
-#define __NR_ia32_restart_syscall 0
-#define __NR_ia32_exit 1
-#define __NR_ia32_read 3
-#define __NR_ia32_write 4
-#define __NR_ia32_sigreturn 119
-#define __NR_ia32_rt_sigreturn 173
-
-#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/ide.h b/include/asm-x86_64/ide.h
deleted file mode 100644
index 4cef0ef..0000000
--- a/include/asm-x86_64/ide.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/ide.h>
diff --git a/include/asm-x86_64/idle.h b/include/asm-x86_64/idle.h
deleted file mode 100644
index 6bd47dc..0000000
--- a/include/asm-x86_64/idle.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _ASM_X86_64_IDLE_H
-#define _ASM_X86_64_IDLE_H 1
-
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
-void enter_idle(void);
-void exit_idle(void);
-
-#endif
diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h
deleted file mode 100644
index 8633331..0000000
--- a/include/asm-x86_64/intel_arch_perfmon.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef X86_64_INTEL_ARCH_PERFMON_H
-#define X86_64_INTEL_ARCH_PERFMON_H 1
-
-#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
-#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
-
-#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
-#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
-
-#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
-#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
-#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
-#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
-
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
- (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
-
-union cpuid10_eax {
- struct {
- unsigned int version_id:8;
- unsigned int num_counters:8;
- unsigned int bit_width:8;
- unsigned int mask_length:8;
- } split;
- unsigned int full;
-};
-
-#endif /* X86_64_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
deleted file mode 100644
index 7475095..0000000
--- a/include/asm-x86_64/io.h
+++ /dev/null
@@ -1,276 +0,0 @@
-#ifndef _ASM_IO_H
-#define _ASM_IO_H
-
-
-/*
- * This file contains the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl and the "string versions" of the same
- * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
- * versions of the single-IO instructions (inb_p/inw_p/..).
- *
- * This file is not meant to be obfuscating: it's just complicated
- * to (a) handle it all in a way that makes gcc able to optimize it
- * as well as possible and (b) trying to avoid writing the same thing
- * over and over again with slight variations and possibly making a
- * mistake somewhere.
- */
-
-/*
- * Thanks to James van Artsdalen for a better timing-fix than
- * the two short jumps: using outb's to a nonexistent port seems
- * to guarantee better timings even on fast machines.
- *
- * On the other hand, I'd like to be sure of a non-existent port:
- * I feel a bit unsafe about using 0x80 (should be safe, though)
- *
- * Linus
- */
-
- /*
- * Bit simplified and optimized by Jan Hubicka
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
- *
- * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
- * isa_read[wl] and isa_write[wl] fixed
- * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
- */
-
-#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-
-#ifdef REALLY_SLOW_IO
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-#else
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
-
-/*
- * Talk about misusing macros..
- */
-#define __OUT1(s,x) \
-static inline void out##s(unsigned x value, unsigned short port) {
-
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
-
-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
-
-#define __IN1(s) \
-static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
-
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
-
-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-
-#define __INS(s) \
-static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define __OUTS(s) \
-static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define RETURN_TYPE unsigned char
-__IN(b,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned short
-__IN(w,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned int
-__IN(l,"")
-#undef RETURN_TYPE
-
-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
-
-__INS(b)
-__INS(w)
-__INS(l)
-
-__OUTS(b)
-__OUTS(w)
-__OUTS(l)
-
-#define IO_SPACE_LIMIT 0xffff
-
-#if defined(__KERNEL__) && defined(__x86_64__)
-
-#include <linux/vmalloc.h>
-
-#ifndef __i386__
-/*
- * Change virtual addresses to physical addresses and vv.
- * These are pretty trivial
- */
-static inline unsigned long virt_to_phys(volatile void * address)
-{
- return __pa(address);
-}
-
-static inline void * phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-#endif
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
-#include <asm-generic/iomap.h>
-
-extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-
-static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, 0);
-}
-
-extern void *early_ioremap(unsigned long addr, unsigned long size);
-extern void early_iounmap(void *addr, unsigned long size);
-
-/*
- * This one maps high address device memory and turns off caching for that area.
- * it's useful if some control registers are in such an area and write combining
- * or read caching is not desirable:
- */
-extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
-extern void iounmap(volatile void __iomem *addr);
-extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
-#define isa_bus_to_virt phys_to_virt
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
- */
-
-static inline __u8 __readb(const volatile void __iomem *addr)
-{
- return *(__force volatile __u8 *)addr;
-}
-static inline __u16 __readw(const volatile void __iomem *addr)
-{
- return *(__force volatile __u16 *)addr;
-}
-static __always_inline __u32 __readl(const volatile void __iomem *addr)
-{
- return *(__force volatile __u32 *)addr;
-}
-static inline __u64 __readq(const volatile void __iomem *addr)
-{
- return *(__force volatile __u64 *)addr;
-}
-#define readb(x) __readb(x)
-#define readw(x) __readw(x)
-#define readl(x) __readl(x)
-#define readq(x) __readq(x)
-#define readb_relaxed(a) readb(a)
-#define readw_relaxed(a) readw(a)
-#define readl_relaxed(a) readl(a)
-#define readq_relaxed(a) readq(a)
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_readq readq
-
-#define mmiowb()
-
-static inline void __writel(__u32 b, volatile void __iomem *addr)
-{
- *(__force volatile __u32 *)addr = b;
-}
-static inline void __writeq(__u64 b, volatile void __iomem *addr)
-{
- *(__force volatile __u64 *)addr = b;
-}
-static inline void __writeb(__u8 b, volatile void __iomem *addr)
-{
- *(__force volatile __u8 *)addr = b;
-}
-static inline void __writew(__u16 b, volatile void __iomem *addr)
-{
- *(__force volatile __u16 *)addr = b;
-}
-#define writeq(val,addr) __writeq((val),(addr))
-#define writel(val,addr) __writel((val),(addr))
-#define writew(val,addr) __writew((val),(addr))
-#define writeb(val,addr) __writeb((val),(addr))
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-#define __raw_writeq writeq
-
-void __memcpy_fromio(void*,unsigned long,unsigned);
-void __memcpy_toio(unsigned long,const void*,unsigned);
-
-static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
-{
- __memcpy_fromio(to,(unsigned long)from,len);
-}
-static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
-{
- __memcpy_toio((unsigned long)to,from,len);
-}
-
-void memset_io(volatile void __iomem *a, int b, size_t c);
-
-/*
- * ISA space is 'always mapped' on a typical x86 system, no need to
- * explicitly ioremap() it. The fact that the ISA IO space is mapped
- * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
- * are physical addresses. The following constant pointer can be
- * used as the IO-area pointer (it can be iounmapped as well, so the
- * analogy with PCI is quite large):
- */
-#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
-#define flush_write_buffers()
-
-extern int iommu_bio_merge;
-#define BIO_VMERGE_BOUNDARY iommu_bio_merge
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
deleted file mode 100644
index d9f2e54..0000000
--- a/include/asm-x86_64/io_apic.h
+++ /dev/null
@@ -1,136 +0,0 @@
-#ifndef __ASM_IO_APIC_H
-#define __ASM_IO_APIC_H
-
-#include <asm/types.h>
-#include <asm/mpspec.h>
-#include <asm/apicdef.h>
-
-/*
- * Intel IO-APIC support for SMP and UP systems.
- *
- * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
- */
-
-#define APIC_MISMATCH_DEBUG
-
-/*
- * The structure of the IO-APIC:
- */
-union IO_APIC_reg_00 {
- u32 raw;
- struct {
- u32 __reserved_2 : 14,
- LTS : 1,
- delivery_type : 1,
- __reserved_1 : 8,
- ID : 8;
- } __attribute__ ((packed)) bits;
-};
-
-union IO_APIC_reg_01 {
- u32 raw;
- struct {
- u32 version : 8,
- __reserved_2 : 7,
- PRQ : 1,
- entries : 8,
- __reserved_1 : 8;
- } __attribute__ ((packed)) bits;
-};
-
-union IO_APIC_reg_02 {
- u32 raw;
- struct {
- u32 __reserved_2 : 24,
- arbitration : 4,
- __reserved_1 : 4;
- } __attribute__ ((packed)) bits;
-};
-
-union IO_APIC_reg_03 {
- u32 raw;
- struct {
- u32 boot_DT : 1,
- __reserved_1 : 31;
- } __attribute__ ((packed)) bits;
-};
-
-/*
- * # of IO-APICs and # of IRQ routing registers
- */
-extern int nr_ioapics;
-extern int nr_ioapic_registers[MAX_IO_APICS];
-
-enum ioapic_irq_destination_types {
- dest_Fixed = 0,
- dest_LowestPrio = 1,
- dest_SMI = 2,
- dest__reserved_1 = 3,
- dest_NMI = 4,
- dest_INIT = 5,
- dest__reserved_2 = 6,
- dest_ExtINT = 7
-};
-
-struct IO_APIC_route_entry {
- __u32 vector : 8,
- delivery_mode : 3, /* 000: FIXED
- * 001: lowest prio
- * 111: ExtINT
- */
- dest_mode : 1, /* 0: physical, 1: logical */
- delivery_status : 1,
- polarity : 1,
- irr : 1,
- trigger : 1, /* 0: edge, 1: level */
- mask : 1, /* 0: enabled, 1: disabled */
- __reserved_2 : 15;
-
- __u32 __reserved_3 : 24,
- dest : 8;
-} __attribute__ ((packed));
-
-/*
- * MP-BIOS irq configuration table structures:
- */
-
-/* I/O APIC entries */
-extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-
-/* # of MP IRQ source entries */
-extern int mp_irq_entries;
-
-/* MP IRQ source entries */
-extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-
-/* non-0 if default (table-less) MP configuration */
-extern int mpc_default_type;
-
-/* 1 if "noapic" boot option passed */
-extern int skip_ioapic_setup;
-
-static inline void disable_ioapic_setup(void)
-{
- skip_ioapic_setup = 1;
-}
-
-
-/*
- * If we use the IO-APIC for IRQ routing, disable automatic
- * assignment of PCI IRQ's.
- */
-#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
-
-#ifdef CONFIG_ACPI
-extern int io_apic_get_version (int ioapic);
-extern int io_apic_get_redir_entries (int ioapic);
-extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int);
-#endif
-
-extern int sis_apic_bug; /* dummy */
-
-void enable_NMI_through_LVT0 (void * dummy);
-
-extern spinlock_t i8259A_lock;
-
-#endif
diff --git a/include/asm-x86_64/ioctl.h b/include/asm-x86_64/ioctl.h
deleted file mode 100644
index b279fe0..0000000
--- a/include/asm-x86_64/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/include/asm-x86_64/ioctls.h b/include/asm-x86_64/ioctls.h
deleted file mode 100644
index 3fc0b15..0000000
--- a/include/asm-x86_64/ioctls.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef __ARCH_X8664_IOCTLS_H__
-#define __ARCH_X8664_IOCTLS_H__
-
-#include <asm/ioctl.h>
-
-/* 0x54 is just a magic number to make these relatively unique ('T') */
-
-#define TCGETS 0x5401
-#define TCSETS 0x5402
-#define TCSETSW 0x5403
-#define TCSETSF 0x5404
-#define TCGETA 0x5405
-#define TCSETA 0x5406
-#define TCSETAW 0x5407
-#define TCSETAF 0x5408
-#define TCSBRK 0x5409
-#define TCXONC 0x540A
-#define TCFLSH 0x540B
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-#define TIOCGPGRP 0x540F
-#define TIOCSPGRP 0x5410
-#define TIOCOUTQ 0x5411
-#define TIOCSTI 0x5412
-#define TIOCGWINSZ 0x5413
-#define TIOCSWINSZ 0x5414
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define FIONREAD 0x541B
-#define TIOCINQ FIONREAD
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-#define FIONBIO 0x5421
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TCGETS2 _IOR('T',0x2A, struct termios2)
-#define TCSETS2 _IOW('T',0x2B, struct termios2)
-#define TCSETSW2 _IOW('T',0x2C, struct termios2)
-#define TCSETSF2 _IOW('T',0x2D, struct termios2)
-#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
-#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
-
-#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
-#define FIOCLEX 0x5451
-#define FIOASYNC 0x5452
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
-#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
-#define FIOQSIZE 0x5460
-
-/* Used for packet mode */
-#define TIOCPKT_DATA 0
-#define TIOCPKT_FLUSHREAD 1
-#define TIOCPKT_FLUSHWRITE 2
-#define TIOCPKT_STOP 4
-#define TIOCPKT_START 8
-#define TIOCPKT_NOSTOP 16
-#define TIOCPKT_DOSTOP 32
-
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
-#endif
diff --git a/include/asm-x86_64/iommu.h b/include/asm-x86_64/iommu.h
deleted file mode 100644
index 5af471f..0000000
--- a/include/asm-x86_64/iommu.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _ASM_X8664_IOMMU_H
-#define _ASM_X8664_IOMMU_H 1
-
-extern void pci_iommu_shutdown(void);
-extern void no_iommu_init(void);
-extern int force_iommu, no_iommu;
-extern int iommu_detected;
-#ifdef CONFIG_IOMMU
-extern void gart_iommu_init(void);
-extern void gart_iommu_shutdown(void);
-extern void __init gart_parse_options(char *);
-extern void iommu_hole_init(void);
-extern int fallback_aper_order;
-extern int fallback_aper_force;
-extern int iommu_aperture;
-extern int iommu_aperture_allowed;
-extern int iommu_aperture_disabled;
-extern int fix_aperture;
-#else
-#define iommu_aperture 0
-#define iommu_aperture_allowed 0
-
-static inline void gart_iommu_shutdown(void)
-{
-}
-
-#endif
-
-#endif
diff --git a/include/asm-x86_64/ipcbuf.h b/include/asm-x86_64/ipcbuf.h
deleted file mode 100644
index 470cf85..0000000
--- a/include/asm-x86_64/ipcbuf.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __x86_64_IPCBUF_H__
-#define __x86_64_IPCBUF_H__
-
-/*
- * The ipc64_perm structure for x86_64 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __x86_64_IPCBUF_H__ */
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
deleted file mode 100644
index a7c75ea..0000000
--- a/include/asm-x86_64/ipi.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#ifndef __ASM_IPI_H
-#define __ASM_IPI_H
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC InterProcessor Interrupt code.
- *
- * Moved to include file by James Cleverdon from
- * arch/x86-64/kernel/smp.c
- *
- * Copyrights from kernel/smp.c:
- *
- * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
- * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
- * (c) 2002,2003 Andi Kleen, SuSE Labs.
- * Subject to the GNU Public License, v.2
- */
-
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-
-/*
- * the following functions deal with sending IPIs between CPUs.
- *
- * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
- */
-
-static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest)
-{
- unsigned int icr = shortcut | dest;
-
- switch (vector) {
- default:
- icr |= APIC_DM_FIXED | vector;
- break;
- case NMI_VECTOR:
- icr |= APIC_DM_NMI;
- break;
- }
- return icr;
-}
-
-static inline int __prepare_ICR2 (unsigned int mask)
-{
- return SET_APIC_DEST_FIELD(mask);
-}
-
-static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
-{
- /*
- * Subtle. In the case of the 'never do double writes' workaround
- * we have to lock out interrupts to be safe. As we don't care
- * of the value read we use an atomic rmw access to avoid costly
- * cli/sti. Otherwise we use an even cheaper single atomic write
- * to the APIC.
- */
- unsigned int cfg;
-
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
-
- /*
- * No need to touch the target chip field
- */
- cfg = __prepare_ICR(shortcut, vector, dest);
-
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write(APIC_ICR, cfg);
-}
-
-/*
- * This is used to send an IPI with no shorthand notation (the destination is
- * specified in bits 56 to 63 of the ICR).
- */
-static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
-{
- unsigned long cfg;
-
- /*
- * Wait for idle.
- */
- if (unlikely(vector == NMI_VECTOR))
- safe_apic_wait_icr_idle();
- else
- apic_wait_icr_idle();
-
- /*
- * prepare target chip field
- */
- cfg = __prepare_ICR2(mask);
- apic_write(APIC_ICR2, cfg);
-
- /*
- * program the ICR
- */
- cfg = __prepare_ICR(0, vector, dest);
-
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write(APIC_ICR, cfg);
-}
-
-static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
-{
- unsigned long flags;
- unsigned long query_cpu;
-
- /*
- * Hack. The clustered APIC addressing mode doesn't allow us to send
- * to an arbitrary mask, so I do a unicast to each CPU instead.
- * - mbligh
- */
- local_irq_save(flags);
- for_each_cpu_mask(query_cpu, mask) {
- __send_IPI_dest_field(x86_cpu_to_apicid[query_cpu],
- vector, APIC_DEST_PHYSICAL);
- }
- local_irq_restore(flags);
-}
-
-#endif /* __ASM_IPI_H */
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h
deleted file mode 100644
index 5006c6e..0000000
--- a/include/asm-x86_64/irq.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _ASM_IRQ_H
-#define _ASM_IRQ_H
-
-/*
- * linux/include/asm/irq.h
- *
- * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- * IRQ/IPI changes taken from work by Thomas Radke
- * <tomsoft@informatik.tu-chemnitz.de>
- */
-
-#define TIMER_IRQ 0
-
-/*
- * 16 8259A IRQ's, 208 potential APIC interrupt sources.
- * Right now the APIC is mostly only used for SMP.
- * 256 vectors is an architectural limit. (we can have
- * more than 256 devices theoretically, but they will
- * have to use shared interrupts)
- * Since vectors 0x00-0x1f are used/reserved for the CPU,
- * the usable vector space is 0x20-0xff (224 vectors)
- */
-
-/*
- * The maximum number of vectors supported by x86_64 processors
- * is limited to 256. For processors other than x86_64, NR_VECTORS
- * should be changed accordingly.
- */
-#define NR_VECTORS 256
-
-#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
-
-#define NR_IRQS (NR_VECTORS + (32 *NR_CPUS))
-#define NR_IRQ_VECTORS NR_IRQS
-
-static __inline__ int irq_canonicalize(int irq)
-{
- return ((irq == 2) ? 9 : irq);
-}
-
-#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
-
-#ifdef CONFIG_HOTPLUG_CPU
-#include <linux/cpumask.h>
-extern void fixup_irqs(cpumask_t map);
-#endif
-
-#define __ARCH_HAS_DO_SOFTIRQ 1
-
-#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86_64/irq_regs.h b/include/asm-x86_64/irq_regs.h
deleted file mode 100644
index 3dd9c0b..0000000
--- a/include/asm-x86_64/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h
deleted file mode 100644
index 86e70fe..0000000
--- a/include/asm-x86_64/irqflags.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * include/asm-x86_64/irqflags.h
- *
- * IRQ flags handling
- *
- * This file gets included from lowlevel asm headers too, to provide
- * wrapped versions of the local_irq_*() APIs, based on the
- * raw_local_irq_*() functions from the lowlevel headers.
- */
-#ifndef _ASM_IRQFLAGS_H
-#define _ASM_IRQFLAGS_H
-#include <asm/processor-flags.h>
-
-#ifndef __ASSEMBLY__
-/*
- * Interrupt control:
- */
-
-static inline unsigned long __raw_local_save_flags(void)
-{
- unsigned long flags;
-
- __asm__ __volatile__(
- "# __raw_save_flags\n\t"
- "pushfq ; popq %q0"
- : "=g" (flags)
- : /* no input */
- : "memory"
- );
-
- return flags;
-}
-
-#define raw_local_save_flags(flags) \
- do { (flags) = __raw_local_save_flags(); } while (0)
-
-static inline void raw_local_irq_restore(unsigned long flags)
-{
- __asm__ __volatile__(
- "pushq %0 ; popfq"
- : /* no output */
- :"g" (flags)
- :"memory", "cc"
- );
-}
-
-#ifdef CONFIG_X86_VSMP
-
-/*
- * Interrupt control for the VSMP architecture:
- */
-
-static inline void raw_local_irq_disable(void)
-{
- unsigned long flags = __raw_local_save_flags();
-
- raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-}
-
-static inline void raw_local_irq_enable(void)
-{
- unsigned long flags = __raw_local_save_flags();
-
- raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-}
-
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
- return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
-}
-
-#else /* CONFIG_X86_VSMP */
-
-static inline void raw_local_irq_disable(void)
-{
- __asm__ __volatile__("cli" : : : "memory");
-}
-
-static inline void raw_local_irq_enable(void)
-{
- __asm__ __volatile__("sti" : : : "memory");
-}
-
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
- return !(flags & X86_EFLAGS_IF);
-}
-
-#endif
-
-/*
- * For spinlocks, etc.:
- */
-
-static inline unsigned long __raw_local_irq_save(void)
-{
- unsigned long flags = __raw_local_save_flags();
-
- raw_local_irq_disable();
-
- return flags;
-}
-
-#define raw_local_irq_save(flags) \
- do { (flags) = __raw_local_irq_save(); } while (0)
-
-static inline int raw_irqs_disabled(void)
-{
- unsigned long flags = __raw_local_save_flags();
-
- return raw_irqs_disabled_flags(flags);
-}
-
-/*
- * Used in the idle loop; sti takes one instruction cycle
- * to complete:
- */
-static inline void raw_safe_halt(void)
-{
- __asm__ __volatile__("sti; hlt" : : : "memory");
-}
-
-/*
- * Used when interrupts are already enabled or to
- * shutdown the processor:
- */
-static inline void halt(void)
-{
- __asm__ __volatile__("hlt": : :"memory");
-}
-
-#else /* __ASSEMBLY__: */
-# ifdef CONFIG_TRACE_IRQFLAGS
-# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
-# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
-# else
-# define TRACE_IRQS_ON
-# define TRACE_IRQS_OFF
-# endif
-#endif
-
-#endif
diff --git a/include/asm-x86_64/ist.h b/include/asm-x86_64/ist.h
deleted file mode 100644
index 338857e..0000000
--- a/include/asm-x86_64/ist.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/ist.h>
diff --git a/include/asm-x86_64/k8.h b/include/asm-x86_64/k8.h
deleted file mode 100644
index 699dd69..0000000
--- a/include/asm-x86_64/k8.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _ASM_K8_H
-#define _ASM_K8_H 1
-
-#include <linux/pci.h>
-
-extern struct pci_device_id k8_nb_ids[];
-
-extern int early_is_k8_nb(u32 value);
-extern struct pci_dev **k8_northbridges;
-extern int num_k8_northbridges;
-extern int cache_k8_northbridges(void);
-extern void k8_flush_garts(void);
-
-#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
deleted file mode 100644
index d7e2bcf..0000000
--- a/include/asm-x86_64/kdebug.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef _X86_64_KDEBUG_H
-#define _X86_64_KDEBUG_H 1
-
-#include <linux/notifier.h>
-
-struct pt_regs;
-
-extern int register_page_fault_notifier(struct notifier_block *);
-extern int unregister_page_fault_notifier(struct notifier_block *);
-
-/* Grossly misnamed. */
-enum die_val {
- DIE_OOPS = 1,
- DIE_INT3,
- DIE_DEBUG,
- DIE_PANIC,
- DIE_NMI,
- DIE_DIE,
- DIE_NMIWATCHDOG,
- DIE_KERNELDEBUG,
- DIE_TRAP,
- DIE_GPF,
- DIE_CALL,
- DIE_NMI_IPI,
- DIE_PAGE_FAULT,
-};
-
-extern void printk_address(unsigned long address);
-extern void die(const char *,struct pt_regs *,long);
-extern void __die(const char *,struct pt_regs *,long);
-extern void show_registers(struct pt_regs *regs);
-extern void dump_pagetable(unsigned long);
-extern unsigned long oops_begin(void);
-extern void oops_end(unsigned long);
-
-#endif
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
deleted file mode 100644
index 738e581..0000000
--- a/include/asm-x86_64/kexec.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef _X86_64_KEXEC_H
-#define _X86_64_KEXEC_H
-
-#define PA_CONTROL_PAGE 0
-#define VA_CONTROL_PAGE 1
-#define PA_PGD 2
-#define VA_PGD 3
-#define PA_PUD_0 4
-#define VA_PUD_0 5
-#define PA_PMD_0 6
-#define VA_PMD_0 7
-#define PA_PTE_0 8
-#define VA_PTE_0 9
-#define PA_PUD_1 10
-#define VA_PUD_1 11
-#define PA_PMD_1 12
-#define VA_PMD_1 13
-#define PA_PTE_1 14
-#define VA_PTE_1 15
-#define PA_TABLE_PAGE 16
-#define PAGES_NR 17
-
-#ifndef __ASSEMBLY__
-
-#include <linux/string.h>
-
-#include <asm/page.h>
-#include <asm/ptrace.h>
-
-/*
- * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
- * I.e. Maximum page that is mapped directly into kernel memory,
- * and kmap is not required.
- *
- * So far x86_64 is limited to 40 physical address bits.
- */
-
-/* Maximum physical address we can use pages from */
-#define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL)
-/* Maximum address we can reach in physical address mode */
-#define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL)
-/* Maximum address we can use for the control pages */
-#define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
-
-/* Allocate one page for the pdp and the second for the code */
-#define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL)
-
-/* The native architecture */
-#define KEXEC_ARCH KEXEC_ARCH_X86_64
-
-/*
- * Saving the registers of the cpu on which panic occured in
- * crash_kexec to save a valid sp. The registers of other cpus
- * will be saved in machine_crash_shutdown while shooting down them.
- */
-
-static inline void crash_setup_regs(struct pt_regs *newregs,
- struct pt_regs *oldregs)
-{
- if (oldregs)
- memcpy(newregs, oldregs, sizeof(*newregs));
- else {
- __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx));
- __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx));
- __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx));
- __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi));
- __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi));
- __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp));
- __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax));
- __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp));
- __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
- __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
- __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
- __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
- __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
- __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
- __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
- __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
- __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
- __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
- __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags));
-
- newregs->rip = (unsigned long)current_text_addr();
- }
-}
-
-NORET_TYPE void
-relocate_kernel(unsigned long indirection_page,
- unsigned long page_list,
- unsigned long start_address) ATTRIB_NORET;
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _X86_64_KEXEC_H */
diff --git a/include/asm-x86_64/kmap_types.h b/include/asm-x86_64/kmap_types.h
deleted file mode 100644
index 7486338..0000000
--- a/include/asm-x86_64/kmap_types.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
-
-#endif
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
deleted file mode 100644
index 7db8254..0000000
--- a/include/asm-x86_64/kprobes.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef _ASM_KPROBES_H
-#define _ASM_KPROBES_H
-/*
- * Kernel Probes (KProbes)
- * include/asm-x86_64/kprobes.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- *
- * 2004-Oct Prasanna S Panchamukhi <prasanna@in.ibm.com> and Jim Keniston
- * kenistoj@us.ibm.com adopted from i386.
- */
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/percpu.h>
-
-#define __ARCH_WANT_KPROBES_INSN_SLOT
-
-struct pt_regs;
-struct kprobe;
-
-typedef u8 kprobe_opcode_t;
-#define BREAKPOINT_INSTRUCTION 0xcc
-#define MAX_INSN_SIZE 15
-#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
- (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
- ? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
-
-#define ARCH_SUPPORTS_KRETPROBES
-#define ARCH_INACTIVE_KPROBE_COUNT 1
-
-void kretprobe_trampoline(void);
-extern void arch_remove_kprobe(struct kprobe *p);
-#define flush_insn_slot(p) do { } while (0)
-
-/* Architecture specific copy of original instruction*/
-struct arch_specific_insn {
- /* copy of the original instruction */
- kprobe_opcode_t *insn;
-};
-
-struct prev_kprobe {
- struct kprobe *kp;
- unsigned long status;
- unsigned long old_rflags;
- unsigned long saved_rflags;
-};
-
-/* per-cpu kprobe control block */
-struct kprobe_ctlblk {
- unsigned long kprobe_status;
- unsigned long kprobe_old_rflags;
- unsigned long kprobe_saved_rflags;
- long *jprobe_saved_rsp;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
- struct prev_kprobe prev_kprobe;
-};
-
-/* trap3/1 are intr gates for kprobes. So, restore the status of IF,
- * if necessary, before executing the original int3/1 (trap) handler.
- */
-static inline void restore_interrupts(struct pt_regs *regs)
-{
- if (regs->eflags & IF_MASK)
- local_irq_enable();
-}
-
-extern int post_kprobe_handler(struct pt_regs *regs);
-extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
-extern int kprobe_handler(struct pt_regs *regs);
-
-extern int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
-#endif /* _ASM_KPROBES_H */
diff --git a/include/asm-x86_64/ldt.h b/include/asm-x86_64/ldt.h
deleted file mode 100644
index 9ef647b..0000000
--- a/include/asm-x86_64/ldt.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * ldt.h
- *
- * Definitions of structures used with the modify_ldt system call.
- */
-#ifndef _LINUX_LDT_H
-#define _LINUX_LDT_H
-
-/* Maximum number of LDT entries supported. */
-#define LDT_ENTRIES 8192
-/* The size of each LDT entry. */
-#define LDT_ENTRY_SIZE 8
-
-#ifndef __ASSEMBLY__
-/* Note on 64bit base and limit is ignored and you cannot set
- DS/ES/CS not to the default values if you still want to do syscalls. This
- call is more for 32bit mode therefore. */
-struct user_desc {
- unsigned int entry_number;
- unsigned int base_addr;
- unsigned int limit;
- unsigned int seg_32bit:1;
- unsigned int contents:2;
- unsigned int read_exec_only:1;
- unsigned int limit_in_pages:1;
- unsigned int seg_not_present:1;
- unsigned int useable:1;
- unsigned int lm:1;
-};
-
-#define MODIFY_LDT_CONTENTS_DATA 0
-#define MODIFY_LDT_CONTENTS_STACK 1
-#define MODIFY_LDT_CONTENTS_CODE 2
-
-#endif /* !__ASSEMBLY__ */
-#endif
diff --git a/include/asm-x86_64/linkage.h b/include/asm-x86_64/linkage.h
deleted file mode 100644
index b5f39d0..0000000
--- a/include/asm-x86_64/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_LINKAGE_H
-#define __ASM_LINKAGE_H
-
-#define __ALIGN .p2align 4,,15
-
-#endif
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
deleted file mode 100644
index e87492b..0000000
--- a/include/asm-x86_64/local.h
+++ /dev/null
@@ -1,222 +0,0 @@
-#ifndef _ARCH_X8664_LOCAL_H
-#define _ARCH_X8664_LOCAL_H
-
-#include <linux/percpu.h>
-#include <asm/atomic.h>
-
-typedef struct
-{
- atomic_long_t a;
-} local_t;
-
-#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-
-#define local_read(l) atomic_long_read(&(l)->a)
-#define local_set(l,i) atomic_long_set(&(l)->a, (i))
-
-static inline void local_inc(local_t *l)
-{
- __asm__ __volatile__(
- "incq %0"
- :"=m" (l->a.counter)
- :"m" (l->a.counter));
-}
-
-static inline void local_dec(local_t *l)
-{
- __asm__ __volatile__(
- "decq %0"
- :"=m" (l->a.counter)
- :"m" (l->a.counter));
-}
-
-static inline void local_add(long i, local_t *l)
-{
- __asm__ __volatile__(
- "addq %1,%0"
- :"=m" (l->a.counter)
- :"ir" (i), "m" (l->a.counter));
-}
-
-static inline void local_sub(long i, local_t *l)
-{
- __asm__ __volatile__(
- "subq %1,%0"
- :"=m" (l->a.counter)
- :"ir" (i), "m" (l->a.counter));
-}
-
-/**
- * local_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @l: pointer to type local_t
- *
- * Atomically subtracts @i from @l and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int local_sub_and_test(long i, local_t *l)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- "subq %2,%0; sete %1"
- :"=m" (l->a.counter), "=qm" (c)
- :"ir" (i), "m" (l->a.counter) : "memory");
- return c;
-}
-
-/**
- * local_dec_and_test - decrement and test
- * @l: pointer to type local_t
- *
- * Atomically decrements @l by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __inline__ int local_dec_and_test(local_t *l)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- "decq %0; sete %1"
- :"=m" (l->a.counter), "=qm" (c)
- :"m" (l->a.counter) : "memory");
- return c != 0;
-}
-
-/**
- * local_inc_and_test - increment and test
- * @l: pointer to type local_t
- *
- * Atomically increments @l by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int local_inc_and_test(local_t *l)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- "incq %0; sete %1"
- :"=m" (l->a.counter), "=qm" (c)
- :"m" (l->a.counter) : "memory");
- return c != 0;
-}
-
-/**
- * local_add_negative - add and test if negative
- * @i: integer value to add
- * @l: pointer to type local_t
- *
- * Atomically adds @i to @l and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __inline__ int local_add_negative(long i, local_t *l)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- "addq %2,%0; sets %1"
- :"=m" (l->a.counter), "=qm" (c)
- :"ir" (i), "m" (l->a.counter) : "memory");
- return c;
-}
-
-/**
- * local_add_return - add and return
- * @i: integer value to add
- * @l: pointer to type local_t
- *
- * Atomically adds @i to @l and returns @i + @l
- */
-static __inline__ long local_add_return(long i, local_t *l)
-{
- long __i = i;
- __asm__ __volatile__(
- "xaddq %0, %1;"
- :"+r" (i), "+m" (l->a.counter)
- : : "memory");
- return i + __i;
-}
-
-static __inline__ long local_sub_return(long i, local_t *l)
-{
- return local_add_return(-i,l);
-}
-
-#define local_inc_return(l) (local_add_return(1,l))
-#define local_dec_return(l) (local_sub_return(1,l))
-
-#define local_cmpxchg(l, o, n) \
- (cmpxchg_local(&((l)->a.counter), (o), (n)))
-/* Always has a lock prefix */
-#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
-
-/**
- * atomic_up_add_unless - add unless the number is a given value
- * @l: pointer of type local_t
- * @a: the amount to add to l...
- * @u: ...unless l is equal to u.
- *
- * Atomically adds @a to @l, so long as it was not @u.
- * Returns non-zero if @l was not @u, and zero otherwise.
- */
-#define local_add_unless(l, a, u) \
-({ \
- long c, old; \
- c = local_read(l); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = local_cmpxchg((l), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
-#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
-
-/* On x86-64 these are better than the atomic variants on SMP kernels
- because they dont use a lock prefix. */
-#define __local_inc(l) local_inc(l)
-#define __local_dec(l) local_dec(l)
-#define __local_add(i,l) local_add((i),(l))
-#define __local_sub(i,l) local_sub((i),(l))
-
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- *
- * This could be done better if we moved the per cpu data directly
- * after GS.
- */
-
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
-#endif /* _ARCH_X8664_LOCAL_H */
diff --git a/include/asm-x86_64/mach_apic.h b/include/asm-x86_64/mach_apic.h
deleted file mode 100644
index 7b7115a..0000000
--- a/include/asm-x86_64/mach_apic.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_APIC_H
-#define __ASM_MACH_APIC_H
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch defines.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-
-#include <asm/genapic.h>
-
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#define TARGET_CPUS (genapic->target_cpus())
-#define vector_allocation_domain (genapic->vector_allocation_domain)
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define phys_pkg_id (genapic->phys_pkg_id)
-
-#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86_64/mc146818rtc.h b/include/asm-x86_64/mc146818rtc.h
deleted file mode 100644
index d6e3009..0000000
--- a/include/asm-x86_64/mc146818rtc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Machine dependent access functions for RTC registers.
- */
-#ifndef _ASM_MC146818RTC_H
-#define _ASM_MC146818RTC_H
-
-#include <asm/io.h>
-
-#ifndef RTC_PORT
-#define RTC_PORT(x) (0x70 + (x))
-#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
-#endif
-
-/*
- * The yet supported machines all access the RTC index register via
- * an ISA port access but the way to access the date register differs ...
- */
-#define CMOS_READ(addr) ({ \
-outb_p((addr),RTC_PORT(0)); \
-inb_p(RTC_PORT(1)); \
-})
-#define CMOS_WRITE(val, addr) ({ \
-outb_p((addr),RTC_PORT(0)); \
-outb_p((val),RTC_PORT(1)); \
-})
-
-#define RTC_IRQ 8
-
-#endif /* _ASM_MC146818RTC_H */
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
deleted file mode 100644
index 7bc030a..0000000
--- a/include/asm-x86_64/mce.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _ASM_MCE_H
-#define _ASM_MCE_H 1
-
-#include <asm/ioctls.h>
-#include <asm/types.h>
-
-/*
- * Machine Check support for x86
- */
-
-#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
-
-#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
-#define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */
-#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
-
-#define MCI_STATUS_VAL (1UL<<63) /* valid error */
-#define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */
-#define MCI_STATUS_UC (1UL<<61) /* uncorrected error */
-#define MCI_STATUS_EN (1UL<<60) /* error enabled */
-#define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */
-#define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */
-#define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */
-
-/* Fields are zero when not available */
-struct mce {
- __u64 status;
- __u64 misc;
- __u64 addr;
- __u64 mcgstatus;
- __u64 rip;
- __u64 tsc; /* cpu time stamp counter */
- __u64 res1; /* for future extension */
- __u64 res2; /* dito. */
- __u8 cs; /* code segment */
- __u8 bank; /* machine check bank */
- __u8 cpu; /* cpu that raised the error */
- __u8 finished; /* entry is valid */
- __u32 pad;
-};
-
-/*
- * This structure contains all data related to the MCE log.
- * Also carries a signature to make it easier to find from external debugging tools.
- * Each entry is only valid when its finished flag is set.
- */
-
-#define MCE_LOG_LEN 32
-
-struct mce_log {
- char signature[12]; /* "MACHINECHECK" */
- unsigned len; /* = MCE_LOG_LEN */
- unsigned next;
- unsigned flags;
- unsigned pad0;
- struct mce entry[MCE_LOG_LEN];
-};
-
-#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
-
-#define MCE_LOG_SIGNATURE "MACHINECHECK"
-
-#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
-#define MCE_GET_LOG_LEN _IOR('M', 2, int)
-#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
-
-/* Software defined banks */
-#define MCE_EXTENDED_BANK 128
-#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
-
-#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
-#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
-#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
-#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
-#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
-#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
-#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
-#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
-
-#ifdef __KERNEL__
-#include <asm/atomic.h>
-
-void mce_log(struct mce *m);
-DECLARE_PER_CPU(struct sys_device, device_mce);
-
-#ifdef CONFIG_X86_MCE_INTEL
-void mce_intel_feature_init(struct cpuinfo_x86 *c);
-#else
-static inline void mce_intel_feature_init(struct cpuinfo_x86 *c)
-{
-}
-#endif
-
-#ifdef CONFIG_X86_MCE_AMD
-void mce_amd_feature_init(struct cpuinfo_x86 *c);
-#else
-static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
-{
-}
-#endif
-
-void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
-
-extern atomic_t mce_entry;
-
-extern void do_machine_check(struct pt_regs *, long);
-
-extern int mce_notify_user(void);
-
-extern void stop_mce(void);
-extern void restart_mce(void);
-
-#endif
-
-#endif
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
deleted file mode 100644
index dd5cb05..0000000
--- a/include/asm-x86_64/mman.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __X8664_MMAN_H__
-#define __X8664_MMAN_H__
-
-#include <asm-generic/mman.h>
-
-#define MAP_32BIT 0x40 /* only give out 32bit addresses */
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif
diff --git a/include/asm-x86_64/mmsegment.h b/include/asm-x86_64/mmsegment.h
deleted file mode 100644
index d3f80c9..0000000
--- a/include/asm-x86_64/mmsegment.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_MMSEGMENT_H
-#define _ASM_MMSEGMENT_H 1
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-#endif
diff --git a/include/asm-x86_64/mmu.h b/include/asm-x86_64/mmu.h
deleted file mode 100644
index d2cd4a9..0000000
--- a/include/asm-x86_64/mmu.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __x86_64_MMU_H
-#define __x86_64_MMU_H
-
-#include <linux/spinlock.h>
-#include <asm/semaphore.h>
-
-/*
- * The x86_64 doesn't have a mmu context, but
- * we put the segment information here.
- *
- * cpu_vm_mask is used to optimize ldt flushing.
- */
-typedef struct {
- void *ldt;
- rwlock_t ldtlock;
- int size;
- struct semaphore sem;
- void *vdso;
-} mm_context_t;
-
-#endif
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
deleted file mode 100644
index 0cce83a..0000000
--- a/include/asm-x86_64/mmu_context.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef __X86_64_MMU_CONTEXT_H
-#define __X86_64_MMU_CONTEXT_H
-
-#include <asm/desc.h>
-#include <asm/atomic.h>
-#include <asm/pgalloc.h>
-#include <asm/pda.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <asm-generic/mm_hooks.h>
-
-/*
- * possibly do the LDT unload here?
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void destroy_context(struct mm_struct *mm);
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
- if (read_pda(mmu_state) == TLBSTATE_OK)
- write_pda(mmu_state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void load_cr3(pgd_t *pgd)
-{
- asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory");
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
-{
- unsigned cpu = smp_processor_id();
- if (likely(prev != next)) {
- /* stop flush ipis for the previous mm */
- cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
- write_pda(mmu_state, TLBSTATE_OK);
- write_pda(active_mm, next);
-#endif
- cpu_set(cpu, next->cpu_vm_mask);
- load_cr3(next->pgd);
-
- if (unlikely(next->context.ldt != prev->context.ldt))
- load_LDT_nolock(&next->context, cpu);
- }
-#ifdef CONFIG_SMP
- else {
- write_pda(mmu_state, TLBSTATE_OK);
- if (read_pda(active_mm) != next)
- out_of_line_bug();
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload CR3
- * to make sure to use no freed page tables.
- */
- load_cr3(next->pgd);
- load_LDT_nolock(&next->context, cpu);
- }
- }
-#endif
-}
-
-#define deactivate_mm(tsk,mm) do { \
- load_gs_index(0); \
- asm volatile("movl %0,%%fs"::"r"(0)); \
-} while(0)
-
-#define activate_mm(prev, next) \
- switch_mm((prev),(next),NULL)
-
-
-#endif
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
deleted file mode 100644
index 19a8937..0000000
--- a/include/asm-x86_64/mmzone.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* K8 NUMA support */
-/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
-/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
-#ifndef _ASM_X86_64_MMZONE_H
-#define _ASM_X86_64_MMZONE_H 1
-
-
-#ifdef CONFIG_NUMA
-
-#define VIRTUAL_BUG_ON(x)
-
-#include <asm/smp.h>
-
-/* Simple perfect hash to map physical addresses to node numbers */
-struct memnode {
- int shift;
- unsigned int mapsize;
- u8 *map;
- u8 embedded_map[64-16];
-} ____cacheline_aligned; /* total size = 64 bytes */
-extern struct memnode memnode;
-#define memnode_shift memnode.shift
-#define memnodemap memnode.map
-#define memnodemapsize memnode.mapsize
-
-extern struct pglist_data *node_data[];
-
-static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
-{
- unsigned nid;
- VIRTUAL_BUG_ON(!memnodemap);
- VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize);
- nid = memnodemap[addr >> memnode_shift];
- VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
- return nid;
-}
-
-#define NODE_DATA(nid) (node_data[nid])
-
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
- NODE_DATA(nid)->node_spanned_pages)
-
-#ifdef CONFIG_DISCONTIGMEM
-#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
-
-extern int pfn_valid(unsigned long pfn);
-#endif
-
-#ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE (64*1024*1024)
-#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL))
-#endif
-
-#endif
-#endif
diff --git a/include/asm-x86_64/module.h b/include/asm-x86_64/module.h
deleted file mode 100644
index 67f8f69..0000000
--- a/include/asm-x86_64/module.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_X8664_MODULE_H
-#define _ASM_X8664_MODULE_H
-
-struct mod_arch_specific {};
-
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Sym Elf64_Sym
-#define Elf_Ehdr Elf64_Ehdr
-
-#endif
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
deleted file mode 100644
index 017fddb..0000000
--- a/include/asm-x86_64/mpspec.h
+++ /dev/null
@@ -1,233 +0,0 @@
-#ifndef __ASM_MPSPEC_H
-#define __ASM_MPSPEC_H
-
-/*
- * Structure definitions for SMP machines following the
- * Intel Multiprocessing Specification 1.1 and 1.4.
- */
-
-/*
- * This tag identifies where the SMP configuration
- * information is.
- */
-
-#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
-
-/*
- * A maximum of 255 APICs with the current APIC ID architecture.
- */
-#define MAX_APICS 255
-
-struct intel_mp_floating
-{
- char mpf_signature[4]; /* "_MP_" */
- unsigned int mpf_physptr; /* Configuration table address */
- unsigned char mpf_length; /* Our length (paragraphs) */
- unsigned char mpf_specification;/* Specification version */
- unsigned char mpf_checksum; /* Checksum (makes sum 0) */
- unsigned char mpf_feature1; /* Standard or configuration ? */
- unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
- unsigned char mpf_feature3; /* Unused (0) */
- unsigned char mpf_feature4; /* Unused (0) */
- unsigned char mpf_feature5; /* Unused (0) */
-};
-
-struct mp_config_table
-{
- char mpc_signature[4];
-#define MPC_SIGNATURE "PCMP"
- unsigned short mpc_length; /* Size of table */
- char mpc_spec; /* 0x01 */
- char mpc_checksum;
- char mpc_oem[8];
- char mpc_productid[12];
- unsigned int mpc_oemptr; /* 0 if not present */
- unsigned short mpc_oemsize; /* 0 if not present */
- unsigned short mpc_oemcount;
- unsigned int mpc_lapic; /* APIC address */
- unsigned int reserved;
-};
-
-/* Followed by entries */
-
-#define MP_PROCESSOR 0
-#define MP_BUS 1
-#define MP_IOAPIC 2
-#define MP_INTSRC 3
-#define MP_LINTSRC 4
-
-struct mpc_config_processor
-{
- unsigned char mpc_type;
- unsigned char mpc_apicid; /* Local APIC number */
- unsigned char mpc_apicver; /* Its versions */
- unsigned char mpc_cpuflag;
-#define CPU_ENABLED 1 /* Processor is available */
-#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
- unsigned int mpc_cpufeature;
-#define CPU_STEPPING_MASK 0x0F
-#define CPU_MODEL_MASK 0xF0
-#define CPU_FAMILY_MASK 0xF00
- unsigned int mpc_featureflag; /* CPUID feature value */
- unsigned int mpc_reserved[2];
-};
-
-struct mpc_config_bus
-{
- unsigned char mpc_type;
- unsigned char mpc_busid;
- unsigned char mpc_bustype[6];
-};
-
-/* List of Bus Type string values, Intel MP Spec. */
-#define BUSTYPE_EISA "EISA"
-#define BUSTYPE_ISA "ISA"
-#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
-#define BUSTYPE_MCA "MCA"
-#define BUSTYPE_VL "VL" /* Local bus */
-#define BUSTYPE_PCI "PCI"
-#define BUSTYPE_PCMCIA "PCMCIA"
-#define BUSTYPE_CBUS "CBUS"
-#define BUSTYPE_CBUSII "CBUSII"
-#define BUSTYPE_FUTURE "FUTURE"
-#define BUSTYPE_MBI "MBI"
-#define BUSTYPE_MBII "MBII"
-#define BUSTYPE_MPI "MPI"
-#define BUSTYPE_MPSA "MPSA"
-#define BUSTYPE_NUBUS "NUBUS"
-#define BUSTYPE_TC "TC"
-#define BUSTYPE_VME "VME"
-#define BUSTYPE_XPRESS "XPRESS"
-
-struct mpc_config_ioapic
-{
- unsigned char mpc_type;
- unsigned char mpc_apicid;
- unsigned char mpc_apicver;
- unsigned char mpc_flags;
-#define MPC_APIC_USABLE 0x01
- unsigned int mpc_apicaddr;
-};
-
-struct mpc_config_intsrc
-{
- unsigned char mpc_type;
- unsigned char mpc_irqtype;
- unsigned short mpc_irqflag;
- unsigned char mpc_srcbus;
- unsigned char mpc_srcbusirq;
- unsigned char mpc_dstapic;
- unsigned char mpc_dstirq;
-};
-
-enum mp_irq_source_types {
- mp_INT = 0,
- mp_NMI = 1,
- mp_SMI = 2,
- mp_ExtINT = 3
-};
-
-#define MP_IRQDIR_DEFAULT 0
-#define MP_IRQDIR_HIGH 1
-#define MP_IRQDIR_LOW 3
-
-
-struct mpc_config_lintsrc
-{
- unsigned char mpc_type;
- unsigned char mpc_irqtype;
- unsigned short mpc_irqflag;
- unsigned char mpc_srcbusid;
- unsigned char mpc_srcbusirq;
- unsigned char mpc_destapic;
-#define MP_APIC_ALL 0xFF
- unsigned char mpc_destapiclint;
-};
-
-/*
- * Default configurations
- *
- * 1 2 CPU ISA 82489DX
- * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
- * 3 2 CPU EISA 82489DX
- * 4 2 CPU MCA 82489DX
- * 5 2 CPU ISA+PCI
- * 6 2 CPU EISA+PCI
- * 7 2 CPU MCA+PCI
- */
-
-#define MAX_MP_BUSSES 256
-/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
-#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
-extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
-extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
-
-extern unsigned int boot_cpu_physical_apicid;
-extern int smp_found_config;
-extern void find_smp_config (void);
-extern void get_smp_config (void);
-extern int nr_ioapics;
-extern unsigned char apic_version [MAX_APICS];
-extern int mp_irq_entries;
-extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
-extern int mpc_default_type;
-extern unsigned long mp_lapic_addr;
-
-#ifdef CONFIG_ACPI
-extern void mp_register_lapic (u8 id, u8 enabled);
-extern void mp_register_lapic_address (u64 address);
-
-extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
-extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
-extern void mp_config_acpi_legacy_irqs (void);
-extern int mp_register_gsi (u32 gsi, int triggering, int polarity);
-#endif
-
-extern int using_apic_timer;
-
-#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
-
-struct physid_mask
-{
- unsigned long mask[PHYSID_ARRAY_SIZE];
-};
-
-typedef struct physid_mask physid_mask_t;
-
-#define physid_set(physid, map) set_bit(physid, (map).mask)
-#define physid_clear(physid, map) clear_bit(physid, (map).mask)
-#define physid_isset(physid, map) test_bit(physid, (map).mask)
-#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
-
-#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
-#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
-#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
-#define physids_complement(dst, src) bitmap_complement((dst).mask, (src).mask, MAX_APICS)
-#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
-#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
-#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
-#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
-#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
-#define physids_coerce(map) ((map).mask[0])
-
-#define physids_promote(physids) \
- ({ \
- physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
- __physid_mask.mask[0] = physids; \
- __physid_mask; \
- })
-
-#define physid_mask_of_physid(physid) \
- ({ \
- physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
- physid_set(physid, __physid_mask); \
- __physid_mask; \
- })
-
-#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
-#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
-
-extern physid_mask_t phys_cpu_present_map;
-
-#endif
-
diff --git a/include/asm-x86_64/msgbuf.h b/include/asm-x86_64/msgbuf.h
deleted file mode 100644
index cd6f95d..0000000
--- a/include/asm-x86_64/msgbuf.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _X8664_MSGBUF_H
-#define _X8664_MSGBUF_H
-
-/*
- * The msqid64_ds structure for x86-64 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 2 miscellaneous 64-bit values
- */
-
-struct msqid64_ds {
- struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
- unsigned long msg_cbytes; /* current number of bytes on queue */
- unsigned long msg_qnum; /* number of messages in queue */
- unsigned long msg_qbytes; /* max number of bytes on queue */
- __kernel_pid_t msg_lspid; /* pid of last msgsnd */
- __kernel_pid_t msg_lrpid; /* last receive pid */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#endif
diff --git a/include/asm-x86_64/msidef.h b/include/asm-x86_64/msidef.h
deleted file mode 100644
index 083ad58..0000000
--- a/include/asm-x86_64/msidef.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/msidef.h>
diff --git a/include/asm-x86_64/msr-index.h b/include/asm-x86_64/msr-index.h
deleted file mode 100644
index d77a63f..0000000
--- a/include/asm-x86_64/msr-index.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/msr-index.h>
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
deleted file mode 100644
index d5c55b8..0000000
--- a/include/asm-x86_64/msr.h
+++ /dev/null
@@ -1,187 +0,0 @@
-#ifndef X86_64_MSR_H
-#define X86_64_MSR_H 1
-
-#include <asm/msr-index.h>
-
-#ifndef __ASSEMBLY__
-#include <linux/errno.h>
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-
-#define rdmsr(msr,val1,val2) \
- __asm__ __volatile__("rdmsr" \
- : "=a" (val1), "=d" (val2) \
- : "c" (msr))
-
-
-#define rdmsrl(msr,val) do { unsigned long a__,b__; \
- __asm__ __volatile__("rdmsr" \
- : "=a" (a__), "=d" (b__) \
- : "c" (msr)); \
- val = a__ | (b__<<32); \
-} while(0)
-
-#define wrmsr(msr,val1,val2) \
- __asm__ __volatile__("wrmsr" \
- : /* no outputs */ \
- : "c" (msr), "a" (val1), "d" (val2))
-
-#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
-
-/* wrmsr with exception handling */
-#define wrmsr_safe(msr,a,b) ({ int ret__; \
- asm volatile("2: wrmsr ; xorl %0,%0\n" \
- "1:\n\t" \
- ".section .fixup,\"ax\"\n\t" \
- "3: movl %4,%0 ; jmp 1b\n\t" \
- ".previous\n\t" \
- ".section __ex_table,\"a\"\n" \
- " .align 8\n\t" \
- " .quad 2b,3b\n\t" \
- ".previous" \
- : "=a" (ret__) \
- : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
- ret__; })
-
-#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
-
-#define rdmsr_safe(msr,a,b) \
- ({ int ret__; \
- asm volatile ("1: rdmsr\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl %4,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 8\n" \
- " .quad 1b,3b\n" \
- ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
- :"c"(msr), "i"(-EIO), "0"(0)); \
- ret__; })
-
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdtscl(low) \
- __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
-
-#define rdtscp(low,high,aux) \
- asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
-
-#define rdtscll(val) do { \
- unsigned int __a,__d; \
- asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
- (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
-} while(0)
-
-#define rdtscpll(val, aux) do { \
- unsigned long __a, __d; \
- asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
- (val) = (__d << 32) | __a; \
-} while (0)
-
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-
-#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
-
-#define rdpmc(counter,low,high) \
- __asm__ __volatile__("rdpmc" \
- : "=a" (low), "=d" (high) \
- : "c" (counter))
-
-static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- __asm__("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (op));
-}
-
-/* Some CPUID calls want 'count' to be placed in ecx */
-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
- int *edx)
-{
- __asm__("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (op), "c" (count));
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax;
-
- __asm__("cpuid"
- : "=a" (eax)
- : "0" (op)
- : "bx", "cx", "dx");
- return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
- unsigned int eax, ebx;
-
- __asm__("cpuid"
- : "=a" (eax), "=b" (ebx)
- : "0" (op)
- : "cx", "dx" );
- return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
- unsigned int eax, ecx;
-
- __asm__("cpuid"
- : "=a" (eax), "=c" (ecx)
- : "0" (op)
- : "bx", "dx" );
- return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, edx;
-
- __asm__("cpuid"
- : "=a" (eax), "=d" (edx)
- : "0" (op)
- : "bx", "cx");
- return edx;
-}
-
-#ifdef CONFIG_SMP
-void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
-void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
-int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-#else /* CONFIG_SMP */
-static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
-{
- rdmsr(msr_no, *l, *h);
-}
-static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
-{
- wrmsr(msr_no, l, h);
-}
-static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
-{
- return rdmsr_safe(msr_no, l, h);
-}
-static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
-{
- return wrmsr_safe(msr_no, l, h);
-}
-#endif /* CONFIG_SMP */
-#endif /* __ASSEMBLY__ */
-#endif /* X86_64_MSR_H */
diff --git a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h
deleted file mode 100644
index b557c48..0000000
--- a/include/asm-x86_64/mtrr.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/* Generic MTRR (Memory Type Range Register) ioctls.
-
- Copyright (C) 1997-1999 Richard Gooch
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with this library; if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Richard Gooch may be reached by email at rgooch@atnf.csiro.au
- The postal address is:
- Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
-*/
-#ifndef _LINUX_MTRR_H
-#define _LINUX_MTRR_H
-
-#include <linux/ioctl.h>
-
-#define MTRR_IOCTL_BASE 'M'
-
-struct mtrr_sentry
-{
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int type; /* Type of region */
-};
-
-/* Warning: this structure has a different order from i386
- on x86-64. The 32bit emulation code takes care of that.
- But you need to use this for 64bit, otherwise your X server
- will break. */
-struct mtrr_gentry
-{
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int regnum; /* Register number */
- unsigned int type; /* Type of region */
-};
-
-/* These are the various ioctls */
-#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
-#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
-#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
-#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
-#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
-#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
-#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
-#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
-#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
-#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
-
-/* These are the region types */
-#define MTRR_TYPE_UNCACHABLE 0
-#define MTRR_TYPE_WRCOMB 1
-/*#define MTRR_TYPE_ 2*/
-/*#define MTRR_TYPE_ 3*/
-#define MTRR_TYPE_WRTHROUGH 4
-#define MTRR_TYPE_WRPROT 5
-#define MTRR_TYPE_WRBACK 6
-#define MTRR_NUM_TYPES 7
-
-#ifdef __KERNEL__
-
-/* The following functions are for use by other drivers */
-# ifdef CONFIG_MTRR
-extern int mtrr_add (unsigned long base, unsigned long size,
- unsigned int type, char increment);
-extern int mtrr_add_page (unsigned long base, unsigned long size,
- unsigned int type, char increment);
-extern int mtrr_del (int reg, unsigned long base, unsigned long size);
-extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
-# else
-static __inline__ int mtrr_add (unsigned long base, unsigned long size,
- unsigned int type, char increment)
-{
- return -ENODEV;
-}
-static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
- unsigned int type, char increment)
-{
- return -ENODEV;
-}
-static __inline__ int mtrr_del (int reg, unsigned long base,
- unsigned long size)
-{
- return -ENODEV;
-}
-static __inline__ int mtrr_del_page (int reg, unsigned long base,
- unsigned long size)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_MTRR */
-
-#ifdef CONFIG_COMPAT
-#include <linux/compat.h>
-
-struct mtrr_sentry32
-{
- compat_ulong_t base; /* Base address */
- compat_uint_t size; /* Size of region */
- compat_uint_t type; /* Type of region */
-};
-
-struct mtrr_gentry32
-{
- compat_ulong_t regnum; /* Register number */
- compat_uint_t base; /* Base address */
- compat_uint_t size; /* Size of region */
- compat_uint_t type; /* Type of region */
-};
-
-#define MTRR_IOCTL_BASE 'M'
-
-#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
-#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
-#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
-#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
-#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
-#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
-#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
-#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
-#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
-#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
-
-#endif /* CONFIG_COMPAT */
-
-#ifdef CONFIG_MTRR
-extern void mtrr_ap_init(void);
-extern void mtrr_bp_init(void);
-extern void mtrr_save_fixed_ranges(void *);
-extern void mtrr_save_state(void);
-#else
-#define mtrr_ap_init() do {} while (0)
-#define mtrr_bp_init() do {} while (0)
-#define mtrr_save_fixed_ranges(arg) do {} while (0)
-#define mtrr_save_state() do {} while (0)
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
deleted file mode 100644
index 6c2949a..0000000
--- a/include/asm-x86_64/mutex.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Assembly implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- *
- * started by Ingo Molnar:
- *
- * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- */
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
-
-/**
- * __mutex_fastpath_lock - decrement and call function if negative
- * @v: pointer of type atomic_t
- * @fail_fn: function to call if the result is negative
- *
- * Atomically decrements @v and calls <fail_fn> if the result is negative.
- */
-#define __mutex_fastpath_lock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " decl (%%rdi) \n" \
- " jns 1f \n" \
- " call "#fail_fn" \n" \
- "1:" \
- \
- :"=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
-} while (0)
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count,
- int (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
- else
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - increment and call function if nonpositive
- * @v: pointer of type atomic_t
- * @fail_fn: function to call if the result is nonpositive
- *
- * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
- */
-#define __mutex_fastpath_unlock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " incl (%%rdi) \n" \
- " jg 1f \n" \
- " call "#fail_fn" \n" \
- "1: " \
- \
- :"=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
-} while (0)
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
- * if it wasn't 1 originally. [the fallback function is never used on
- * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (likely(atomic_cmpxchg(count, 1, 0) == 1))
- return 1;
- else
- return 0;
-}
-
-#endif
diff --git a/include/asm-x86_64/namei.h b/include/asm-x86_64/namei.h
deleted file mode 100644
index bef239f..0000000
--- a/include/asm-x86_64/namei.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __X8664_NAMEI_H
-#define __X8664_NAMEI_H
-
-/* This dummy routine maybe changed to something useful
- * for /usr/gnemul/ emulation stuff.
- * Look at asm-sparc/namei.h for details.
- */
-
-#define __emul_prefix() NULL
-
-#endif
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
deleted file mode 100644
index 65b6acf..0000000
--- a/include/asm-x86_64/nmi.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * linux/include/asm-i386/nmi.h
- */
-#ifndef ASM_NMI_H
-#define ASM_NMI_H
-
-#include <linux/pm.h>
-#include <asm/io.h>
-
-/**
- * do_nmi_callback
- *
- * Check to see if a callback exists and execute it. Return 1
- * if the handler exists and was handled successfully.
- */
-int do_nmi_callback(struct pt_regs *regs, int cpu);
-
-#ifdef CONFIG_PM
-
-/** Replace the PM callback routine for NMI. */
-struct pm_dev * set_nmi_pm_callback(pm_callback callback);
-
-/** Unset the PM callback routine back to the default. */
-void unset_nmi_pm_callback(struct pm_dev * dev);
-
-#else
-
-static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
-{
- return 0;
-}
-
-static inline void unset_nmi_pm_callback(struct pm_dev * dev)
-{
-}
-
-#endif /* CONFIG_PM */
-
-extern void default_do_nmi(struct pt_regs *);
-extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
-
-#define get_nmi_reason() inb(0x61)
-
-extern int panic_on_timeout;
-extern int unknown_nmi_panic;
-extern int nmi_watchdog_enabled;
-
-extern int check_nmi_watchdog(void);
-extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
-extern int avail_to_resrv_perfctr_nmi(unsigned int);
-extern int reserve_perfctr_nmi(unsigned int);
-extern void release_perfctr_nmi(unsigned int);
-extern int reserve_evntsel_nmi(unsigned int);
-extern void release_evntsel_nmi(unsigned int);
-
-extern void setup_apic_nmi_watchdog (void *);
-extern void stop_apic_nmi_watchdog (void *);
-extern void disable_timer_nmi_watchdog(void);
-extern void enable_timer_nmi_watchdog(void);
-extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
-
-extern void nmi_watchdog_default(void);
-extern int setup_nmi_watchdog(char *);
-
-extern atomic_t nmi_active;
-extern unsigned int nmi_watchdog;
-#define NMI_DISABLED -1
-#define NMI_NONE 0
-#define NMI_IO_APIC 1
-#define NMI_LOCAL_APIC 2
-#define NMI_INVALID 3
-#define NMI_DEFAULT NMI_DISABLED
-
-struct ctl_table;
-struct file;
-extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
- void __user *, size_t *, loff_t *);
-
-extern int unknown_nmi_panic;
-
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
-
-
-void lapic_watchdog_stop(void);
-int lapic_watchdog_init(unsigned nmi_hz);
-int lapic_wd_event(unsigned nmi_hz);
-unsigned lapic_adjust_nmi_hz(unsigned hz);
-int lapic_watchdog_ok(void);
-void disable_lapic_nmi_watchdog(void);
-void enable_lapic_nmi_watchdog(void);
-void stop_nmi(void);
-void restart_nmi(void);
-
-#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
deleted file mode 100644
index 933ff11..0000000
--- a/include/asm-x86_64/numa.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _ASM_X8664_NUMA_H
-#define _ASM_X8664_NUMA_H 1
-
-#include <linux/nodemask.h>
-
-struct bootnode {
- u64 start,end;
-};
-
-extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
-
-#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
-
-extern void numa_add_cpu(int cpu);
-extern void numa_init_array(void);
-extern int numa_off;
-
-extern void numa_set_node(int cpu, int node);
-extern void srat_reserve_add_area(int nodeid);
-extern int hotadd_percent;
-
-extern unsigned char apicid_to_node[256];
-#ifdef CONFIG_NUMA
-extern void __init init_cpu_to_node(void);
-
-static inline void clear_node_cpumask(int cpu)
-{
- clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
-}
-
-#else
-#define init_cpu_to_node() do {} while (0)
-#define clear_node_cpumask(cpu) do {} while (0)
-#endif
-
-#define NUMA_NO_NODE 0xff
-
-#endif
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
deleted file mode 100644
index 88adf1a..0000000
--- a/include/asm-x86_64/page.h
+++ /dev/null
@@ -1,143 +0,0 @@
-#ifndef _X86_64_PAGE_H
-#define _X86_64_PAGE_H
-
-#include <linux/const.h>
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
-
-#define THREAD_ORDER 1
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE-1))
-
-#define EXCEPTION_STACK_ORDER 0
-#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-
-#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
-#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-
-#define IRQSTACK_ORDER 2
-#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-
-#define STACKFAULT_STACK 1
-#define DOUBLEFAULT_STACK 2
-#define NMI_STACK 3
-#define DEBUG_STACK 4
-#define MCE_STACK 5
-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-
-#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
-
-#define HPAGE_SHIFT PMD_SHIFT
-#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
-#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-extern unsigned long end_pfn;
-
-void clear_page(void *);
-void copy_page(void *, void *);
-
-#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pmd; } pmd_t;
-typedef struct { unsigned long pud; } pud_t;
-typedef struct { unsigned long pgd; } pgd_t;
-#define PTE_MASK PHYSICAL_PAGE_MASK
-
-typedef struct { unsigned long pgprot; } pgprot_t;
-
-extern unsigned long phys_base;
-
-#define pte_val(x) ((x).pte)
-#define pmd_val(x) ((x).pmd)
-#define pud_val(x) ((x).pud)
-#define pgd_val(x) ((x).pgd)
-#define pgprot_val(x) ((x).pgprot)
-
-#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
-#define __pud(x) ((pud_t) { (x) } )
-#define __pgd(x) ((pgd_t) { (x) } )
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-#endif /* !__ASSEMBLY__ */
-
-#define __PHYSICAL_START CONFIG_PHYSICAL_START
-#define __KERNEL_ALIGN 0x200000
-
-/*
- * Make sure kernel is aligned to 2MB address. Catching it at compile
- * time is better. Change your config file and compile the kernel
- * for a 2MB aligned address (CONFIG_PHYSICAL_START)
- */
-#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
-#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
-#endif
-
-#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
-#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
-#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-/* See Documentation/x86_64/mm.txt for a description of the memory map. */
-#define __PHYSICAL_MASK_SHIFT 46
-#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
-#define __VIRTUAL_MASK_SHIFT 48
-#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
-
-#define KERNEL_TEXT_SIZE (40*1024*1024)
-#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
-#define PAGE_OFFSET __PAGE_OFFSET
-
-#ifndef __ASSEMBLY__
-
-#include <asm/bug.h>
-
-extern unsigned long __phys_addr(unsigned long);
-
-#endif /* __ASSEMBLY__ */
-
-#define __pa(x) __phys_addr((unsigned long)(x))
-#define __pa_symbol(x) __phys_addr((unsigned long)(x))
-
-#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define __boot_va(x) __va(x)
-#define __boot_pa(x) __pa(x)
-#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn) ((pfn) < end_pfn)
-#endif
-
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-
-#define VM_DATA_DEFAULT_FLAGS \
- (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define __HAVE_ARCH_GATE_AREA 1
-
-#include <asm-generic/memory_model.h>
-#include <asm-generic/page.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _X86_64_PAGE_H */
diff --git a/include/asm-x86_64/param.h b/include/asm-x86_64/param.h
deleted file mode 100644
index a728786..0000000
--- a/include/asm-x86_64/param.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASMx86_64_PARAM_H
-#define _ASMx86_64_PARAM_H
-
-#ifdef __KERNEL__
-# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* .. some user interfaces are in "ticks */
-#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#endif
diff --git a/include/asm-x86_64/parport.h b/include/asm-x86_64/parport.h
deleted file mode 100644
index 7135ef9..0000000
--- a/include/asm-x86_64/parport.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * parport.h: ia32-specific parport initialisation
- *
- * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
- *
- * This file should only be included by drivers/parport/parport_pc.c.
- */
-
-#ifndef _ASM_X8664_PARPORT_H
-#define _ASM_X8664_PARPORT_H 1
-
-static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
-static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
-{
- return parport_pc_find_isa_ports (autoirq, autodma);
-}
-
-#endif
diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h
deleted file mode 100644
index 6823fa4..0000000
--- a/include/asm-x86_64/pci-direct.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef ASM_PCI_DIRECT_H
-#define ASM_PCI_DIRECT_H 1
-
-#include <linux/types.h>
-
-/* Direct PCI access. This is used for PCI accesses in early boot before
- the PCI subsystem works. */
-
-extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
-extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
-extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
-extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
-extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
-
-extern int early_pci_allowed(void);
-
-#endif
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
deleted file mode 100644
index 5da8cb0..0000000
--- a/include/asm-x86_64/pci.h
+++ /dev/null
@@ -1,126 +0,0 @@
-#ifndef __x8664_PCI_H
-#define __x8664_PCI_H
-
-#include <asm/io.h>
-
-#ifdef __KERNEL__
-
-struct pci_sysdata {
- int node; /* NUMA node */
- void* iommu; /* IOMMU private data */
-};
-
-extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
-
-#ifdef CONFIG_CALGARY_IOMMU
-static inline void* pci_iommu(struct pci_bus *bus)
-{
- struct pci_sysdata *sd = bus->sysdata;
- return sd->iommu;
-}
-
-static inline void set_pci_iommu(struct pci_bus *bus, void *val)
-{
- struct pci_sysdata *sd = bus->sysdata;
- sd->iommu = val;
-}
-#endif /* CONFIG_CALGARY_IOMMU */
-
-#include <linux/mm.h> /* for struct page */
-
-/* Can be used to override the logic in pci_scan_bus for skipping
- already-configured bus numbers - to be used for buggy BIOSes
- or architectures with incomplete PCI setup by the loader */
-
-#ifdef CONFIG_PCI
-extern unsigned int pcibios_assign_all_busses(void);
-#else
-#define pcibios_assign_all_busses() 0
-#endif
-#define pcibios_scan_all_fns(a, b) 0
-
-extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM (pci_mem_start)
-
-#define PCIBIOS_MIN_CARDBUS_IO 0x4000
-
-void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
-extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-
-void pcibios_set_master(struct pci_dev *dev);
-void pcibios_penalize_isa_irq(int irq, int active);
-struct irq_routing_table *pcibios_get_irq_routing_table(void);
-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/scatterlist.h>
-#include <linux/string.h>
-#include <asm/page.h>
-
-extern void pci_iommu_alloc(void);
-extern int iommu_setup(char *opt);
-
-/* The PCI address space does equal the physical memory
- * address space. The networking and block device layers use
- * this boolean for bounce buffer decisions
- *
- * On AMD64 it mostly equals, but we set it to zero if a hardware
- * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
- */
-#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
-
-#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
-
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
- dma_addr_t ADDR_NAME;
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
- __u32 LEN_NAME;
-#define pci_unmap_addr(PTR, ADDR_NAME) \
- ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
- (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME) \
- ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
-
-#else
-/* No IOMMU */
-
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME) (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-
-#endif
-
-#include <asm-generic/pci-dma-compat.h>
-
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
- enum pci_dma_burst_strategy *strat,
- unsigned long *strategy_parameter)
-{
- *strat = PCI_DMA_BURST_INFINITY;
- *strategy_parameter = ~0UL;
-}
-#endif
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
-#endif /* __KERNEL__ */
-
-/* generic pci stuff */
-#ifdef CONFIG_PCI
-#include <asm-generic/pci.h>
-#endif
-
-#endif /* __x8664_PCI_H */
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
deleted file mode 100644
index 5642634..0000000
--- a/include/asm-x86_64/pda.h
+++ /dev/null
@@ -1,125 +0,0 @@
-#ifndef X86_64_PDA_H
-#define X86_64_PDA_H
-
-#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <linux/types.h>
-#include <linux/cache.h>
-#include <asm/page.h>
-
-/* Per processor datastructure. %gs points to it while the kernel runs */
-struct x8664_pda {
- struct task_struct *pcurrent; /* 0 Current process */
- unsigned long data_offset; /* 8 Per cpu data offset from linker
- address */
- unsigned long kernelstack; /* 16 top of kernel stack for current */
- unsigned long oldrsp; /* 24 user rsp for system call */
- int irqcount; /* 32 Irq nesting counter. Starts with -1 */
- int cpunumber; /* 36 Logical CPU number */
-#ifdef CONFIG_CC_STACKPROTECTOR
- unsigned long stack_canary; /* 40 stack canary value */
- /* gcc-ABI: this canary MUST be at
- offset 40!!! */
-#endif
- char *irqstackptr;
- int nodenumber; /* number of current node */
- unsigned int __softirq_pending;
- unsigned int __nmi_count; /* number of NMI on this CPUs */
- short mmu_state;
- short isidle;
- struct mm_struct *active_mm;
- unsigned apic_timer_irqs;
-} ____cacheline_aligned_in_smp;
-
-extern struct x8664_pda *_cpu_pda[];
-extern struct x8664_pda boot_cpu_pda[];
-
-#define cpu_pda(i) (_cpu_pda[i])
-
-/*
- * There is no fast way to get the base address of the PDA, all the accesses
- * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
- */
-extern void __bad_pda_field(void) __attribute__((noreturn));
-
-/*
- * proxy_pda doesn't actually exist, but tell gcc it is accessed for
- * all PDA accesses so it gets read/write dependencies right.
- */
-extern struct x8664_pda _proxy_pda;
-
-#define pda_offset(field) offsetof(struct x8664_pda, field)
-
-#define pda_to_op(op,field,val) do { \
- typedef typeof(_proxy_pda.field) T__; \
- if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
- switch (sizeof(_proxy_pda.field)) { \
- case 2: \
- asm(op "w %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- case 4: \
- asm(op "l %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i" (pda_offset(field))); \
- break; \
- case 8: \
- asm(op "q %1,%%gs:%c2": \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- default: \
- __bad_pda_field(); \
- } \
- } while (0)
-
-#define pda_from_op(op,field) ({ \
- typeof(_proxy_pda.field) ret__; \
- switch (sizeof(_proxy_pda.field)) { \
- case 2: \
- asm(op "w %%gs:%c1,%0" : \
- "=r" (ret__) : \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- case 4: \
- asm(op "l %%gs:%c1,%0": \
- "=r" (ret__): \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- case 8: \
- asm(op "q %%gs:%c1,%0": \
- "=r" (ret__) : \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- default: \
- __bad_pda_field(); \
- } \
- ret__; })
-
-#define read_pda(field) pda_from_op("mov",field)
-#define write_pda(field,val) pda_to_op("mov",field,val)
-#define add_pda(field,val) pda_to_op("add",field,val)
-#define sub_pda(field,val) pda_to_op("sub",field,val)
-#define or_pda(field,val) pda_to_op("or",field,val)
-
-/* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define test_and_clear_bit_pda(bit,field) ({ \
- int old__; \
- asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (_proxy_pda.field) \
- : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
- old__; \
-})
-
-#endif
-
-#define PDA_STACKOFFSET (5*8)
-
-#endif
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
deleted file mode 100644
index 5abd482..0000000
--- a/include/asm-x86_64/percpu.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef _ASM_X8664_PERCPU_H_
-#define _ASM_X8664_PERCPU_H_
-#include <linux/compiler.h>
-
-/* Same as asm-generic/percpu.h, except that we store the per cpu offset
- in the PDA. Longer term the PDA and every per cpu variable
- should be just put into a single section and referenced directly
- from %gs */
-
-#ifdef CONFIG_SMP
-
-#include <asm/pda.h>
-
-#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
-#define __my_cpu_offset() read_pda(data_offset)
-
-#define per_cpu_offset(x) (__per_cpu_offset(x))
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- __attribute__((__section__(".data.percpu.shared_aligned"))) \
- __typeof__(type) per_cpu__##name \
- ____cacheline_internodealigned_in_smp
-
-/* var is in discarded region: offset to particular copy we want */
-#define per_cpu(var, cpu) (*({ \
- extern int simple_identifier_##var(void); \
- RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)); }))
-#define __get_cpu_var(var) (*({ \
- extern int simple_identifier_##var(void); \
- RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); }))
-#define __raw_get_cpu_var(var) (*({ \
- extern int simple_identifier_##var(void); \
- RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); }))
-
-/* A macro to avoid #include hell... */
-#define percpu_modcopy(pcpudst, src, size) \
-do { \
- unsigned int __i; \
- for_each_possible_cpu(__i) \
- memcpy((pcpudst)+__per_cpu_offset(__i), \
- (src), (size)); \
-} while (0)
-
-extern void setup_per_cpu_areas(void);
-
-#else /* ! SMP */
-
-#define DEFINE_PER_CPU(type, name) \
- __typeof__(type) per_cpu__##name
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
-
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
-#define __get_cpu_var(var) per_cpu__##var
-#define __raw_get_cpu_var(var) per_cpu__##var
-
-#endif /* SMP */
-
-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
-
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
-#endif /* _ASM_X8664_PERCPU_H_ */
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
deleted file mode 100644
index 8bb5646..0000000
--- a/include/asm-x86_64/pgalloc.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef _X86_64_PGALLOC_H
-#define _X86_64_PGALLOC_H
-
-#include <asm/pda.h>
-#include <linux/threads.h>
-#include <linux/mm.h>
-
-#define pmd_populate_kernel(mm, pmd, pte) \
- set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
-#define pud_populate(mm, pud, pmd) \
- set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
-#define pgd_populate(mm, pgd, pud) \
- set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
-
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
-{
- set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
-}
-
-static inline void pmd_free(pmd_t *pmd)
-{
- BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- free_page((unsigned long)pmd);
-}
-
-static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
-{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline void pud_free (pud_t *pud)
-{
- BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- free_page((unsigned long)pud);
-}
-
-static inline void pgd_list_add(pgd_t *pgd)
-{
- struct page *page = virt_to_page(pgd);
-
- spin_lock(&pgd_lock);
- list_add(&page->lru, &pgd_list);
- spin_unlock(&pgd_lock);
-}
-
-static inline void pgd_list_del(pgd_t *pgd)
-{
- struct page *page = virt_to_page(pgd);
-
- spin_lock(&pgd_lock);
- list_del(&page->lru);
- spin_unlock(&pgd_lock);
-}
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- unsigned boundary;
- pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
- if (!pgd)
- return NULL;
- pgd_list_add(pgd);
- /*
- * Copy kernel pointers in from init.
- * Could keep a freelist or slab cache of those because the kernel
- * part never changes.
- */
- boundary = pgd_index(__PAGE_OFFSET);
- memset(pgd, 0, boundary * sizeof(pgd_t));
- memcpy(pgd + boundary,
- init_level4_pgt + boundary,
- (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
- return pgd;
-}
-
-static inline void pgd_free(pgd_t *pgd)
-{
- BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
- pgd_list_del(pgd);
- free_page((unsigned long)pgd);
-}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
- return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
- if (!p)
- return NULL;
- return virt_to_page(p);
-}
-
-/* Should really implement gc for free page table pages. This could be
- done with a reference count in struct page. */
-
-static inline void pte_free_kernel(pte_t *pte)
-{
- BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct page *pte)
-{
- __free_page(pte);
-}
-
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
-
-#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-
-#endif /* _X86_64_PGALLOC_H */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
deleted file mode 100644
index 57dd6b3..0000000
--- a/include/asm-x86_64/pgtable.h
+++ /dev/null
@@ -1,432 +0,0 @@
-#ifndef _X86_64_PGTABLE_H
-#define _X86_64_PGTABLE_H
-
-#include <linux/const.h>
-#ifndef __ASSEMBLY__
-
-/*
- * This file contains the functions and defines necessary to modify and use
- * the x86-64 page table tree.
- */
-#include <asm/processor.h>
-#include <asm/bitops.h>
-#include <linux/threads.h>
-#include <asm/pda.h>
-
-extern pud_t level3_kernel_pgt[512];
-extern pud_t level3_ident_pgt[512];
-extern pmd_t level2_kernel_pgt[512];
-extern pgd_t init_level4_pgt[];
-extern unsigned long __supported_pte_mask;
-
-#define swapper_pg_dir init_level4_pgt
-
-extern void paging_init(void);
-extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
-#endif /* !__ASSEMBLY__ */
-
-/*
- * PGDIR_SHIFT determines what a top-level page table entry can map
- */
-#define PGDIR_SHIFT 39
-#define PTRS_PER_PGD 512
-
-/*
- * 3rd level page
- */
-#define PUD_SHIFT 30
-#define PTRS_PER_PUD 512
-
-/*
- * PMD_SHIFT determines the size of the area a middle-level
- * page table can map
- */
-#define PMD_SHIFT 21
-#define PTRS_PER_PMD 512
-
-/*
- * entries per page directory level
- */
-#define PTRS_PER_PTE 512
-
-#ifndef __ASSEMBLY__
-
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-#define pud_ERROR(e) \
- printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-
-#define pgd_none(x) (!pgd_val(x))
-#define pud_none(x) (!pud_val(x))
-
-static inline void set_pte(pte_t *dst, pte_t val)
-{
- pte_val(*dst) = pte_val(val);
-}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
-static inline void set_pmd(pmd_t *dst, pmd_t val)
-{
- pmd_val(*dst) = pmd_val(val);
-}
-
-static inline void set_pud(pud_t *dst, pud_t val)
-{
- pud_val(*dst) = pud_val(val);
-}
-
-static inline void pud_clear (pud_t *pud)
-{
- set_pud(pud, __pud(0));
-}
-
-static inline void set_pgd(pgd_t *dst, pgd_t val)
-{
- pgd_val(*dst) = pgd_val(val);
-}
-
-static inline void pgd_clear (pgd_t * pgd)
-{
- set_pgd(pgd, __pgd(0));
-}
-
-#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0))
-
-struct mm_struct;
-
-static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
-{
- pte_t pte;
- if (full) {
- pte = *ptep;
- *ptep = __pte(0);
- } else {
- pte = ptep_get_and_clear(mm, addr, ptep);
- }
- return pte;
-}
-
-#define pte_same(a, b) ((a).pte == (b).pte)
-
-#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
-
-#endif /* !__ASSEMBLY__ */
-
-#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
-#define FIRST_USER_ADDRESS 0
-
-#define MAXMEM _AC(0x3fffffffffff, UL)
-#define VMALLOC_START _AC(0xffffc20000000000, UL)
-#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
-#define MODULES_VADDR _AC(0xffffffff88000000, UL)
-#define MODULES_END _AC(0xfffffffffff00000, UL)
-#define MODULES_LEN (MODULES_END - MODULES_VADDR)
-
-#define _PAGE_BIT_PRESENT 0
-#define _PAGE_BIT_RW 1
-#define _PAGE_BIT_USER 2
-#define _PAGE_BIT_PWT 3
-#define _PAGE_BIT_PCD 4
-#define _PAGE_BIT_ACCESSED 5
-#define _PAGE_BIT_DIRTY 6
-#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
-#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
-
-#define _PAGE_PRESENT 0x001
-#define _PAGE_RW 0x002
-#define _PAGE_USER 0x004
-#define _PAGE_PWT 0x008
-#define _PAGE_PCD 0x010
-#define _PAGE_ACCESSED 0x020
-#define _PAGE_DIRTY 0x040
-#define _PAGE_PSE 0x080 /* 2MB page */
-#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
-#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
-
-#define _PAGE_PROTNONE 0x080 /* If not present */
-#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
-
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY PAGE_COPY_NOEXEC
-#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define __PAGE_KERNEL \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-#define __PAGE_KERNEL_EXEC \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
-#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-#define __PAGE_KERNEL_VSYSCALL \
- (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
-#define __PAGE_KERNEL_LARGE \
- (__PAGE_KERNEL | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_EXEC \
- (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-
-#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
-
-#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
-#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
-
-/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
-#ifndef __ASSEMBLY__
-
-static inline unsigned long pgd_bad(pgd_t pgd)
-{
- return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-}
-
-static inline unsigned long pud_bad(pud_t pud)
-{
- return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-}
-
-static inline unsigned long pmd_bad(pmd_t pmd)
-{
- return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-}
-
-#define pte_none(x) (!pte_val(x))
-#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
- right? */
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
-static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-{
- pte_t pte;
- pte_val(pte) = (page_nr << PAGE_SHIFT);
- pte_val(pte) |= pgprot_val(pgprot);
- pte_val(pte) &= __supported_pte_mask;
- return pte;
-}
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
-
-static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
-static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; }
-
-struct vm_area_struct;
-
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- if (!pte_young(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
-}
-
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- clear_bit(_PAGE_BIT_RW, &ptep->pte);
-}
-
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
-
-static inline int pmd_large(pmd_t pte) {
- return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
-}
-
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-
-/*
- * Level 4 access.
- */
-#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
-#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
-#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
-#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
-
-/* PUD - Level3 access */
-/* to find an entry in a page-table-directory. */
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
-#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
-#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
-
-/* PMD - Level 2 access */
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
- pmd_index(address))
-#define pmd_none(x) (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
-#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
-#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
-
-/* PTE - Level 1 access. */
-
-/* page, protection -> pte */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
-
-/* Change flags of a PTE */
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- pte_val(pte) &= _PAGE_CHG_MASK;
- pte_val(pte) |= pgprot_val(newprot);
- pte_val(pte) &= __supported_pte_mask;
- return pte;
-}
-
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
- pte_index(address))
-
-/* x86-64 always has all page tables mapped. */
-#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
-#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
-#define pte_unmap(pte) /* NOP */
-#define pte_unmap_nested(pte) /* NOP */
-
-#define update_mmu_cache(vma,address,pte) do { } while (0)
-
-/* We only update the dirty/accessed state if we set
- * the dirty bit by hand in the kernel, since the hardware
- * will do the accessed bit for us, and we don't want to
- * race with other CPU's that might be updating the dirty
- * bit at the same time. */
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-({ \
- int __changed = !pte_same(*(__ptep), __entry); \
- if (__changed && __dirty) { \
- set_pte(__ptep, __entry); \
- flush_tlb_page(__vma, __address); \
- } \
- __changed; \
-})
-
-/* Encode and de-code a swap entry */
-#define __swp_type(x) (((x).val >> 1) & 0x3f)
-#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-extern spinlock_t pgd_lock;
-extern struct list_head pgd_list;
-
-extern int kern_addr_valid(unsigned long addr);
-
-pte_t *lookup_address(unsigned long addr);
-
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
-#define HAVE_ARCH_UNMAPPED_AREA
-
-#define pgtable_cache_init() do { } while (0)
-#define check_pgt_cache() do { } while (0)
-
-#define PAGE_AGP PAGE_KERNEL_NOCACHE
-#define HAVE_PAGE_AGP 1
-
-/* fs/proc/kcore.c */
-#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
-#define kc_offset_to_vaddr(o) \
- (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
-
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#define __HAVE_ARCH_PTE_SAME
-#include <asm-generic/pgtable.h>
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _X86_64_PGTABLE_H */
diff --git a/include/asm-x86_64/poll.h b/include/asm-x86_64/poll.h
deleted file mode 100644
index c98509d..0000000
--- a/include/asm-x86_64/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/include/asm-x86_64/posix_types.h b/include/asm-x86_64/posix_types.h
deleted file mode 100644
index 9926aa4..0000000
--- a/include/asm-x86_64/posix_types.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef _ASM_X86_64_POSIX_TYPES_H
-#define _ASM_X86_64_POSIX_TYPES_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc. Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-typedef unsigned long __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
-
-typedef unsigned long __kernel_old_dev_t;
-
-#ifdef __KERNEL__
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant cases (8 or 32 longs,
- * for 256 and 1024-bit fd_sets respectively)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned long *tmp = p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 32:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
- tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
- tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
- tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
- return;
- case 16:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- return;
- case 8:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- return;
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
-
-#endif
diff --git a/include/asm-x86_64/prctl.h b/include/asm-x86_64/prctl.h
deleted file mode 100644
index 52952ad..0000000
--- a/include/asm-x86_64/prctl.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef X86_64_PRCTL_H
-#define X86_64_PRCTL_H 1
-
-#define ARCH_SET_GS 0x1001
-#define ARCH_SET_FS 0x1002
-#define ARCH_GET_FS 0x1003
-#define ARCH_GET_GS 0x1004
-
-
-#endif
diff --git a/include/asm-x86_64/processor-flags.h b/include/asm-x86_64/processor-flags.h
deleted file mode 100644
index ec99a57..0000000
--- a/include/asm-x86_64/processor-flags.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/processor-flags.h>
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
deleted file mode 100644
index 31f579b..0000000
--- a/include/asm-x86_64/processor.h
+++ /dev/null
@@ -1,439 +0,0 @@
-/*
- * include/asm-x86_64/processor.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-#ifndef __ASM_X86_64_PROCESSOR_H
-#define __ASM_X86_64_PROCESSOR_H
-
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/sigcontext.h>
-#include <asm/cpufeature.h>
-#include <linux/threads.h>
-#include <asm/msr.h>
-#include <asm/current.h>
-#include <asm/system.h>
-#include <asm/mmsegment.h>
-#include <asm/percpu.h>
-#include <linux/personality.h>
-#include <linux/cpumask.h>
-#include <asm/processor-flags.h>
-
-#define TF_MASK 0x00000100
-#define IF_MASK 0x00000200
-#define IOPL_MASK 0x00003000
-#define NT_MASK 0x00004000
-#define VM_MASK 0x00020000
-#define AC_MASK 0x00040000
-#define VIF_MASK 0x00080000 /* virtual interrupt flag */
-#define VIP_MASK 0x00100000 /* virtual interrupt pending */
-#define ID_MASK 0x00200000
-
-#define desc_empty(desc) \
- (!((desc)->a | (desc)->b))
-
-#define desc_equal(desc1, desc2) \
- (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
-
-/*
- * CPU type and hardware bug flags. Kept separately for each CPU.
- */
-
-struct cpuinfo_x86 {
- __u8 x86; /* CPU family */
- __u8 x86_vendor; /* CPU vendor */
- __u8 x86_model;
- __u8 x86_mask;
- int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
- __u32 x86_capability[NCAPINTS];
- char x86_vendor_id[16];
- char x86_model_id[64];
- int x86_cache_size; /* in KB */
- int x86_clflush_size;
- int x86_cache_alignment;
- int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
- __u8 x86_virt_bits, x86_phys_bits;
- __u8 x86_max_cores; /* cpuid returned max cores value */
- __u32 x86_power;
- __u32 extended_cpuid_level; /* Max extended CPUID function supported */
- unsigned long loops_per_jiffy;
-#ifdef CONFIG_SMP
- cpumask_t llc_shared_map; /* cpus sharing the last level cache */
-#endif
- __u8 apicid;
-#ifdef CONFIG_SMP
- __u8 booted_cores; /* number of cores as seen by OS */
- __u8 phys_proc_id; /* Physical Processor id. */
- __u8 cpu_core_id; /* Core id. */
-#endif
-} ____cacheline_aligned;
-
-#define X86_VENDOR_INTEL 0
-#define X86_VENDOR_CYRIX 1
-#define X86_VENDOR_AMD 2
-#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
-#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_TRANSMETA 7
-#define X86_VENDOR_NUM 8
-#define X86_VENDOR_UNKNOWN 0xff
-
-#ifdef CONFIG_SMP
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
-#endif
-
-extern char ignore_irq13;
-
-extern void identify_cpu(struct cpuinfo_x86 *);
-extern void print_cpu_info(struct cpuinfo_x86 *);
-extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
-extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-extern unsigned short num_cache_leaves;
-
-/*
- * Save the cr4 feature set we're using (ie
- * Pentium 4MB enable and PPro Global page
- * enable), so that any CPU's that boot up
- * after us can get the correct flags.
- */
-extern unsigned long mmu_cr4_features;
-
-static inline void set_in_cr4 (unsigned long mask)
-{
- mmu_cr4_features |= mask;
- __asm__("movq %%cr4,%%rax\n\t"
- "orq %0,%%rax\n\t"
- "movq %%rax,%%cr4\n"
- : : "irg" (mask)
- :"ax");
-}
-
-static inline void clear_in_cr4 (unsigned long mask)
-{
- mmu_cr4_features &= ~mask;
- __asm__("movq %%cr4,%%rax\n\t"
- "andq %0,%%rax\n\t"
- "movq %%rax,%%cr4\n"
- : : "irg" (~mask)
- :"ax");
-}
-
-
-/*
- * User space process size. 47bits minus one guard page.
- */
-#define TASK_SIZE64 (0x800000000000UL - 4096)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
-
-#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
-#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
-
-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
-
-/*
- * Size of io_bitmap.
- */
-#define IO_BITMAP_BITS 65536
-#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-#define INVALID_IO_BITMAP_OFFSET 0x8000
-
-struct i387_fxsave_struct {
- u16 cwd;
- u16 swd;
- u16 twd;
- u16 fop;
- u64 rip;
- u64 rdp;
- u32 mxcsr;
- u32 mxcsr_mask;
- u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
- u32 padding[24];
-} __attribute__ ((aligned (16)));
-
-union i387_union {
- struct i387_fxsave_struct fxsave;
-};
-
-struct tss_struct {
- u32 reserved1;
- u64 rsp0;
- u64 rsp1;
- u64 rsp2;
- u64 reserved2;
- u64 ist[7];
- u32 reserved3;
- u32 reserved4;
- u16 reserved5;
- u16 io_bitmap_base;
- /*
- * The extra 1 is there because the CPU will access an
- * additional byte beyond the end of the IO permission
- * bitmap. The extra byte must be all 1 bits, and must
- * be within the limit. Thus we have:
- *
- * 128 bytes, the bitmap itself, for ports 0..0x3ff
- * 8 bytes, for an extra "long" of ~0UL
- */
- unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
-} __attribute__((packed)) ____cacheline_aligned;
-
-
-extern struct cpuinfo_x86 boot_cpu_data;
-DECLARE_PER_CPU(struct tss_struct,init_tss);
-/* Save the original ist values for checking stack pointers during debugging */
-struct orig_ist {
- unsigned long ist[7];
-};
-DECLARE_PER_CPU(struct orig_ist, orig_ist);
-
-#ifdef CONFIG_X86_VSMP
-#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
-#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
-#else
-#define ARCH_MIN_TASKALIGN 16
-#define ARCH_MIN_MMSTRUCT_ALIGN 0
-#endif
-
-struct thread_struct {
- unsigned long rsp0;
- unsigned long rsp;
- unsigned long userrsp; /* Copy from PDA */
- unsigned long fs;
- unsigned long gs;
- unsigned short es, ds, fsindex, gsindex;
-/* Hardware debugging registers */
- unsigned long debugreg0;
- unsigned long debugreg1;
- unsigned long debugreg2;
- unsigned long debugreg3;
- unsigned long debugreg6;
- unsigned long debugreg7;
-/* fault info */
- unsigned long cr2, trap_no, error_code;
-/* floating point info */
- union i387_union i387 __attribute__((aligned(16)));
-/* IO permissions. the bitmap could be moved into the GDT, that would make
- switch faster for a limited number of ioperm using tasks. -AK */
- int ioperm;
- unsigned long *io_bitmap_ptr;
- unsigned io_bitmap_max;
-/* cached TLS descriptors. */
- u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
-} __attribute__((aligned(16)));
-
-#define INIT_THREAD { \
- .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-}
-
-#define INIT_TSS { \
- .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-}
-
-#define INIT_MMAP \
-{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-
-#define start_thread(regs,new_rip,new_rsp) do { \
- asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
- load_gs_index(0); \
- (regs)->rip = (new_rip); \
- (regs)->rsp = (new_rsp); \
- write_pda(oldrsp, (new_rsp)); \
- (regs)->cs = __USER_CS; \
- (regs)->ss = __USER_DS; \
- (regs)->eflags = 0x200; \
- set_fs(USER_DS); \
-} while(0)
-
-#define get_debugreg(var, register) \
- __asm__("movq %%db" #register ", %0" \
- :"=r" (var))
-#define set_debugreg(value, register) \
- __asm__("movq %0,%%db" #register \
- : /* no output */ \
- :"r" (value))
-
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct *tsk);
-
-/*
- * create a kernel thread without removing it from tasklists
- */
-extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
-/*
- * Return saved PC of a blocked thread.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- */
-#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
-
-extern unsigned long get_wchan(struct task_struct *p);
-#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
-#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
-#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
-
-
-struct microcode_header {
- unsigned int hdrver;
- unsigned int rev;
- unsigned int date;
- unsigned int sig;
- unsigned int cksum;
- unsigned int ldrver;
- unsigned int pf;
- unsigned int datasize;
- unsigned int totalsize;
- unsigned int reserved[3];
-};
-
-struct microcode {
- struct microcode_header hdr;
- unsigned int bits[0];
-};
-
-typedef struct microcode microcode_t;
-typedef struct microcode_header microcode_header_t;
-
-/* microcode format is extended from prescott processors */
-struct extended_signature {
- unsigned int sig;
- unsigned int pf;
- unsigned int cksum;
-};
-
-struct extended_sigtable {
- unsigned int count;
- unsigned int cksum;
- unsigned int reserved[3];
- struct extended_signature sigs[0];
-};
-
-
-#define ASM_NOP1 K8_NOP1
-#define ASM_NOP2 K8_NOP2
-#define ASM_NOP3 K8_NOP3
-#define ASM_NOP4 K8_NOP4
-#define ASM_NOP5 K8_NOP5
-#define ASM_NOP6 K8_NOP6
-#define ASM_NOP7 K8_NOP7
-#define ASM_NOP8 K8_NOP8
-
-/* Opteron nops */
-#define K8_NOP1 ".byte 0x90\n"
-#define K8_NOP2 ".byte 0x66,0x90\n"
-#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
-#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
-#define K8_NOP5 K8_NOP3 K8_NOP2
-#define K8_NOP6 K8_NOP3 K8_NOP3
-#define K8_NOP7 K8_NOP4 K8_NOP3
-#define K8_NOP8 K8_NOP4 K8_NOP4
-
-#define ASM_NOP_MAX 8
-
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
- __asm__ __volatile__("rep;nop": : :"memory");
-}
-
-/* Stop speculative execution */
-static inline void sync_core(void)
-{
- int tmp;
- asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
-}
-
-#define ARCH_HAS_PREFETCH
-static inline void prefetch(void *x)
-{
- asm volatile("prefetcht0 (%0)" :: "r" (x));
-}
-
-#define ARCH_HAS_PREFETCHW 1
-static inline void prefetchw(void *x)
-{
- alternative_input("prefetcht0 (%1)",
- "prefetchw (%1)",
- X86_FEATURE_3DNOW,
- "r" (x));
-}
-
-#define ARCH_HAS_SPINLOCK_PREFETCH 1
-
-#define spin_lock_prefetch(x) prefetchw(x)
-
-#define cpu_relax() rep_nop()
-
-static inline void serialize_cpu(void)
-{
- __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
-}
-
-static inline void __monitor(const void *eax, unsigned long ecx,
- unsigned long edx)
-{
- /* "monitor %eax,%ecx,%edx;" */
- asm volatile(
- ".byte 0x0f,0x01,0xc8;"
- : :"a" (eax), "c" (ecx), "d"(edx));
-}
-
-static inline void __mwait(unsigned long eax, unsigned long ecx)
-{
- /* "mwait %eax,%ecx;" */
- asm volatile(
- ".byte 0x0f,0x01,0xc9;"
- : :"a" (eax), "c" (ecx));
-}
-
-static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
-{
- /* "mwait %eax,%ecx;" */
- asm volatile(
- "sti; .byte 0x0f,0x01,0xc9;"
- : :"a" (eax), "c" (ecx));
-}
-
-extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
-
-#define stack_current() \
-({ \
- struct thread_info *ti; \
- asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
- ti->task; \
-})
-
-#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
-
-extern unsigned long boot_option_idle_override;
-/* Boot loader type from the setup header */
-extern int bootloader_type;
-
-#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
-
-#endif /* __ASM_X86_64_PROCESSOR_H */
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
deleted file mode 100644
index 31f20ad..0000000
--- a/include/asm-x86_64/proto.h
+++ /dev/null
@@ -1,104 +0,0 @@
-#ifndef _ASM_X8664_PROTO_H
-#define _ASM_X8664_PROTO_H 1
-
-#include <asm/ldt.h>
-
-/* misc architecture specific prototypes */
-
-struct cpuinfo_x86;
-struct pt_regs;
-
-extern void start_kernel(void);
-extern void pda_init(int);
-
-extern void early_idt_handler(void);
-
-extern void mcheck_init(struct cpuinfo_x86 *c);
-extern void init_memory_mapping(unsigned long start, unsigned long end);
-
-extern void system_call(void);
-extern int kernel_syscall(void);
-extern void syscall_init(void);
-
-extern void ia32_syscall(void);
-extern void ia32_cstar_target(void);
-extern void ia32_sysenter_target(void);
-
-extern void config_acpi_tables(void);
-extern void ia32_syscall(void);
-
-extern int pmtimer_mark_offset(void);
-extern void pmtimer_resume(void);
-extern void pmtimer_wait(unsigned);
-extern unsigned int do_gettimeoffset_pm(void);
-#ifdef CONFIG_X86_PM_TIMER
-extern u32 pmtmr_ioport;
-#else
-#define pmtmr_ioport 0
-#endif
-extern int nohpet;
-
-extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
-
-extern void early_identify_cpu(struct cpuinfo_x86 *c);
-
-extern int k8_scan_nodes(unsigned long start, unsigned long end);
-
-extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
-extern unsigned long numa_free_all_bootmem(void);
-
-extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
-
-extern void load_gs_index(unsigned gs);
-
-extern void stop_timer_interrupt(void);
-extern void main_timer_handler(void);
-
-extern unsigned long end_pfn_map;
-
-extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp);
-extern void show_registers(struct pt_regs *regs);
-
-extern void exception_table_check(void);
-
-extern void acpi_reserve_bootmem(void);
-
-extern void swap_low_mappings(void);
-
-extern void __show_regs(struct pt_regs * regs);
-extern void show_regs(struct pt_regs * regs);
-
-extern void syscall32_cpu_init(void);
-
-extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
-
-extern void early_quirks(void);
-extern void check_efer(void);
-
-extern void select_idle_routine(const struct cpuinfo_x86 *c);
-
-extern unsigned long table_start, table_end;
-
-extern int exception_trace;
-extern unsigned cpu_khz;
-extern unsigned tsc_khz;
-
-extern int reboot_force;
-extern int notsc_setup(char *);
-
-extern int timer_over_8254;
-
-extern int gsi_irq_sharing(int gsi);
-
-extern void smp_local_timer_interrupt(void);
-
-extern int force_mwait;
-
-long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
-
-void i8254_timer_resume(void);
-
-#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
-#define round_down(x,y) ((x) & ~((y)-1))
-
-#endif
diff --git a/include/asm-x86_64/ptrace-abi.h b/include/asm-x86_64/ptrace-abi.h
deleted file mode 100644
index 19184b0..0000000
--- a/include/asm-x86_64/ptrace-abi.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _X86_64_PTRACE_ABI_H
-#define _X86_64_PTRACE_ABI_H
-
-#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
-#define R15 0
-#define R14 8
-#define R13 16
-#define R12 24
-#define RBP 32
-#define RBX 40
-/* arguments: interrupts/non tracing syscalls only save upto here*/
-#define R11 48
-#define R10 56
-#define R9 64
-#define R8 72
-#define RAX 80
-#define RCX 88
-#define RDX 96
-#define RSI 104
-#define RDI 112
-#define ORIG_RAX 120 /* = ERROR */
-/* end of arguments */
-/* cpu exception frame or undefined in case of fast syscall. */
-#define RIP 128
-#define CS 136
-#define EFLAGS 144
-#define RSP 152
-#define SS 160
-#define ARGOFFSET R11
-#endif /* __ASSEMBLY__ */
-
-/* top of stack page */
-#define FRAME_SIZE 168
-
-#define PTRACE_OLDSETOPTIONS 21
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-#define PTRACE_GETFPXREGS 18
-#define PTRACE_SETFPXREGS 19
-
-/* only useful for access 32bit programs */
-#define PTRACE_GET_THREAD_AREA 25
-#define PTRACE_SET_THREAD_AREA 26
-
-#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
-
-#endif
diff --git a/include/asm-x86_64/ptrace.h b/include/asm-x86_64/ptrace.h
deleted file mode 100644
index 7f166cc..0000000
--- a/include/asm-x86_64/ptrace.h
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _X86_64_PTRACE_H
-#define _X86_64_PTRACE_H
-
-#include <linux/compiler.h> /* For __user */
-#include <asm/ptrace-abi.h>
-
-#ifndef __ASSEMBLY__
-
-struct pt_regs {
- unsigned long r15;
- unsigned long r14;
- unsigned long r13;
- unsigned long r12;
- unsigned long rbp;
- unsigned long rbx;
-/* arguments: non interrupts/non tracing syscalls only save upto here*/
- unsigned long r11;
- unsigned long r10;
- unsigned long r9;
- unsigned long r8;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rdx;
- unsigned long rsi;
- unsigned long rdi;
- unsigned long orig_rax;
-/* end of arguments */
-/* cpu exception frame or undefined */
- unsigned long rip;
- unsigned long cs;
- unsigned long eflags;
- unsigned long rsp;
- unsigned long ss;
-/* top of stack page */
-};
-
-#endif
-
-#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
-#define user_mode(regs) (!!((regs)->cs & 3))
-#define user_mode_vm(regs) user_mode(regs)
-#define instruction_pointer(regs) ((regs)->rip)
-#define regs_return_value(regs) ((regs)->rax)
-
-extern unsigned long profile_pc(struct pt_regs *regs);
-void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
-
-struct task_struct;
-
-extern unsigned long
-convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
-
-enum {
- EF_CF = 0x00000001,
- EF_PF = 0x00000004,
- EF_AF = 0x00000010,
- EF_ZF = 0x00000040,
- EF_SF = 0x00000080,
- EF_TF = 0x00000100,
- EF_IE = 0x00000200,
- EF_DF = 0x00000400,
- EF_OF = 0x00000800,
- EF_IOPL = 0x00003000,
- EF_IOPL_RING0 = 0x00000000,
- EF_IOPL_RING1 = 0x00001000,
- EF_IOPL_RING2 = 0x00002000,
- EF_NT = 0x00004000, /* nested task */
- EF_RF = 0x00010000, /* resume */
- EF_VM = 0x00020000, /* virtual mode */
- EF_AC = 0x00040000, /* alignment */
- EF_VIF = 0x00080000, /* virtual interrupt */
- EF_VIP = 0x00100000, /* virtual interrupt pending */
- EF_ID = 0x00200000, /* id */
-};
-
-#endif
-
-#endif
diff --git a/include/asm-x86_64/required-features.h b/include/asm-x86_64/required-features.h
deleted file mode 100644
index e80d576..0000000
--- a/include/asm-x86_64/required-features.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _ASM_REQUIRED_FEATURES_H
-#define _ASM_REQUIRED_FEATURES_H 1
-
-/* Define minimum CPUID feature set for kernel These bits are checked
- really early to actually display a visible error message before the
- kernel dies. Make sure to assign features to the proper mask!
-
- The real information is in arch/x86_64/Kconfig.cpu, this just converts
- the CONFIGs into a bitmask */
-
-/* x86-64 baseline features */
-#define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
-#define NEED_PSE (1<<(X86_FEATURE_PSE & 31))
-#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
-#define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
-#define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
-#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
-#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
-#define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
-#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
-#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
-
-#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
- NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
- NEED_XMM|NEED_XMM2)
-#define SSE_MASK (NEED_XMM|NEED_XMM2)
-
-/* x86-64 baseline features */
-#define NEED_LM (1<<(X86_FEATURE_LM & 31))
-
-#ifdef CONFIG_X86_USE_3DNOW
-# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
-#else
-# define NEED_3DNOW 0
-#endif
-
-#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
-
-#define REQUIRED_MASK2 0
-#define REQUIRED_MASK3 0
-#define REQUIRED_MASK4 0
-#define REQUIRED_MASK5 0
-#define REQUIRED_MASK6 0
-#define REQUIRED_MASK7 0
-
-#endif
diff --git a/include/asm-x86_64/resource.h b/include/asm-x86_64/resource.h
deleted file mode 100644
index f40b406..0000000
--- a/include/asm-x86_64/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _X8664_RESOURCE_H
-#define _X8664_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif
diff --git a/include/asm-x86_64/resume-trace.h b/include/asm-x86_64/resume-trace.h
deleted file mode 100644
index 34bf998..0000000
--- a/include/asm-x86_64/resume-trace.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#define TRACE_RESUME(user) do { \
- if (pm_trace_enabled) { \
- void *tracedata; \
- asm volatile("movq $1f,%0\n" \
- ".section .tracedata,\"a\"\n" \
- "1:\t.word %c1\n" \
- "\t.quad %c2\n" \
- ".previous" \
- :"=r" (tracedata) \
- : "i" (__LINE__), "i" (__FILE__)); \
- generate_resume_trace(tracedata, user); \
- } \
-} while (0)
diff --git a/include/asm-x86_64/rio.h b/include/asm-x86_64/rio.h
deleted file mode 100644
index c7350f6..0000000
--- a/include/asm-x86_64/rio.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Derived from include/asm-i386/mach-summit/mach_mpparse.h
- * and include/asm-i386/mach-default/bios_ebda.h
- *
- * Author: Laurent Vivier <Laurent.Vivier@bull.net>
- */
-
-#ifndef __ASM_RIO_H
-#define __ASM_RIO_H
-
-#define RIO_TABLE_VERSION 3
-
-struct rio_table_hdr {
- u8 version; /* Version number of this data structure */
- u8 num_scal_dev; /* # of Scalability devices */
- u8 num_rio_dev; /* # of RIO I/O devices */
-} __attribute__((packed));
-
-struct scal_detail {
- u8 node_id; /* Scalability Node ID */
- u32 CBAR; /* Address of 1MB register space */
- u8 port0node; /* Node ID port connected to: 0xFF=None */
- u8 port0port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port1node; /* Node ID port connected to: 0xFF = None */
- u8 port1port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port2node; /* Node ID port connected to: 0xFF = None */
- u8 port2port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
-} __attribute__((packed));
-
-struct rio_detail {
- u8 node_id; /* RIO Node ID */
- u32 BBAR; /* Address of 1MB register space */
- u8 type; /* Type of device */
- u8 owner_id; /* Node ID of Hurricane that owns this */
- /* node */
- u8 port0node; /* Node ID port connected to: 0xFF=None */
- u8 port0port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port1node; /* Node ID port connected to: 0xFF=None */
- u8 port1port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 first_slot; /* Lowest slot number below this Calgary */
- u8 status; /* Bit 0 = 1 : the XAPIC is used */
- /* = 0 : the XAPIC is not used, ie: */
- /* ints fwded to another XAPIC */
- /* Bits1:7 Reserved */
- u8 WP_index; /* instance index - lower ones have */
- /* lower slot numbers/PCI bus numbers */
- u8 chassis_num; /* 1 based Chassis number */
-} __attribute__((packed));
-
-enum {
- HURR_SCALABILTY = 0, /* Hurricane Scalability info */
- HURR_RIOIB = 2, /* Hurricane RIOIB info */
- COMPAT_CALGARY = 4, /* Compatibility Calgary */
- ALT_CALGARY = 5, /* Second Planar Calgary */
-};
-
-/*
- * there is a real-mode segmented pointer pointing to the
- * 4K EBDA area at 0x40E.
- */
-static inline unsigned long get_bios_ebda(void)
-{
- unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
- address <<= 4;
- return address;
-}
-
-#endif /* __ASM_RIO_H */
diff --git a/include/asm-x86_64/rtc.h b/include/asm-x86_64/rtc.h
deleted file mode 100644
index 18ed713a..0000000
--- a/include/asm-x86_64/rtc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _X86_64_RTC_H
-#define _X86_64_RTC_H
-
-/*
- * x86 uses the default access methods for the RTC.
- */
-
-#include <asm-generic/rtc.h>
-
-#endif
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
deleted file mode 100644
index 72aeebe..0000000
--- a/include/asm-x86_64/rwlock.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* include/asm-x86_64/rwlock.h
- *
- * Helpers used by both rw spinlocks and rw semaphores.
- *
- * Based in part on code from semaphore.h and
- * spinlock.h Copyright 1996 Linus Torvalds.
- *
- * Copyright 1999 Red Hat, Inc.
- * Copyright 2001,2002 SuSE labs
- *
- * Written by Benjamin LaHaise.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _ASM_X86_64_RWLOCK_H
-#define _ASM_X86_64_RWLOCK_H
-
-#define RW_LOCK_BIAS 0x01000000
-#define RW_LOCK_BIAS_STR "0x01000000"
-
-/* Actual code is in asm/spinlock.h or in arch/x86_64/lib/rwlock.S */
-
-#endif
diff --git a/include/asm-x86_64/scatterlist.h b/include/asm-x86_64/scatterlist.h
deleted file mode 100644
index eaf7ada..0000000
--- a/include/asm-x86_64/scatterlist.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _X8664_SCATTERLIST_H
-#define _X8664_SCATTERLIST_H
-
-#include <asm/types.h>
-
-struct scatterlist {
- struct page *page;
- unsigned int offset;
- unsigned int length;
- dma_addr_t dma_address;
- unsigned int dma_length;
-};
-
-#define ISA_DMA_THRESHOLD (0x00ffffff)
-
-/* These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->dma_length)
-
-#endif
diff --git a/include/asm-x86_64/seccomp.h b/include/asm-x86_64/seccomp.h
deleted file mode 100644
index 553af65..0000000
--- a/include/asm-x86_64/seccomp.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _ASM_SECCOMP_H
-
-#include <linux/thread_info.h>
-
-#ifdef TIF_32BIT
-#error "unexpected TIF_32BIT on x86_64"
-#else
-#define TIF_32BIT TIF_IA32
-#endif
-
-#include <linux/unistd.h>
-#include <asm/ia32_unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#define __NR_seccomp_read_32 __NR_ia32_read
-#define __NR_seccomp_write_32 __NR_ia32_write
-#define __NR_seccomp_exit_32 __NR_ia32_exit
-#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
-
-#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-x86_64/sections.h b/include/asm-x86_64/sections.h
deleted file mode 100644
index c746d9f..0000000
--- a/include/asm-x86_64/sections.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _X8664_SECTIONS_H
-#define _X8664_SECTIONS_H
-
-/* nothing to see, move along */
-#include <asm-generic/sections.h>
-
-#endif
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
deleted file mode 100644
index 04b8ab2..0000000
--- a/include/asm-x86_64/segment.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-#include <asm/cache.h>
-
-/* Simple and small GDT entries for booting only */
-
-#define GDT_ENTRY_BOOT_CS 2
-#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
-
-#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
-#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
-
-#define __KERNEL_CS 0x10
-#define __KERNEL_DS 0x18
-
-#define __KERNEL32_CS 0x08
-
-/*
- * we cannot use the same code segment descriptor for user and kernel
- * -- not even in the long flat mode, because of different DPL /kkeil
- * The segment offset needs to contain a RPL. Grr. -AK
- * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
- */
-
-#define __USER32_CS 0x23 /* 4*8+3 */
-#define __USER_DS 0x2b /* 5*8+3 */
-#define __USER_CS 0x33 /* 6*8+3 */
-#define __USER32_DS __USER_DS
-
-#define GDT_ENTRY_TSS 8 /* needs two entries */
-#define GDT_ENTRY_LDT 10 /* needs two entries */
-#define GDT_ENTRY_TLS_MIN 12
-#define GDT_ENTRY_TLS_MAX 14
-
-#define GDT_ENTRY_TLS_ENTRIES 3
-
-#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
-#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
-
-/* TLS indexes for 64bit - hardcoded in arch_prctl */
-#define FS_TLS 0
-#define GS_TLS 1
-
-#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
-#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
-
-#define IDT_ENTRIES 256
-#define GDT_ENTRIES 16
-#define GDT_SIZE (GDT_ENTRIES * 8)
-#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-
-#endif
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
deleted file mode 100644
index 1194888..0000000
--- a/include/asm-x86_64/semaphore.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef _X86_64_SEMAPHORE_H
-#define _X86_64_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
- * the original code and to make semaphore waits
- * interruptible so that processes waiting on
- * semaphores can be killed.
- * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
- * functions in asm/sempahore-helper.h while fixing a
- * potential and subtle race discovered by Ulrich Schmid
- * in down_interruptible(). Since I started to play here I
- * also implemented the `trylock' semaphore operation.
- * 1999-07-02 Artur Skawina <skawina@geocities.com>
- * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
- * do this). Changed calling sequences from push/jmp to
- * traditional call/ret.
- * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
- * Some hacks to ensure compatibility with recent
- * GCC snapshots, to avoid stack corruption when compiling
- * with -fomit-frame-pointer. It's not sure if this will
- * be fixed in GCC, as our previous implementation was a
- * bit dubious.
- *
- * If you would like to see an analysis of this implementation, please
- * ftp to gcom.com and download the file
- * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
- *
- */
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <asm/rwlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-#include <linux/stringify.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
- */
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/x86_64/kernel/semaphore.c
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
-
- __asm__ __volatile__(
- "# atomic down operation\n\t"
- LOCK_PREFIX "decl %0\n\t" /* --sem->count */
- "jns 1f\n\t"
- "call __down_failed\n"
- "1:"
- :"=m" (sem->count)
- :"D" (sem)
- :"memory");
-}
-
-/*
- * Interruptible try to acquire a semaphore. If we obtained
- * it, return zero. If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
- int result;
-
- might_sleep();
-
- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "call __down_failed_interruptible\n"
- "2:\n"
- :"=&a" (result), "=m" (sem->count)
- :"D" (sem)
- :"memory");
- return result;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- int result;
-
- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "call __down_failed_trylock\n\t"
- "2:\n"
- :"=&a" (result), "=m" (sem->count)
- :"D" (sem)
- :"memory","cc");
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- __asm__ __volatile__(
- "# atomic up operation\n\t"
- LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
- "jg 1f\n\t"
- "call __up_wakeup\n"
- "1:"
- :"=m" (sem->count)
- :"D" (sem)
- :"memory");
-}
-#endif /* __KERNEL__ */
-#endif
diff --git a/include/asm-x86_64/sembuf.h b/include/asm-x86_64/sembuf.h
deleted file mode 100644
index 63b5292..0000000
--- a/include/asm-x86_64/sembuf.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _X86_64_SEMBUF_H
-#define _X86_64_SEMBUF_H
-
-/*
- * The semid64_ds structure for x86_64 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct semid64_ds {
- struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- unsigned long __unused1;
- __kernel_time_t sem_ctime; /* last change time */
- unsigned long __unused2;
- unsigned long sem_nsems; /* no. of semaphores in array */
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _X86_64_SEMBUF_H */
diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
deleted file mode 100644
index b0496e0..0000000
--- a/include/asm-x86_64/serial.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * include/asm-x86_64/serial.h
- */
-
-
-/*
- * This assumes you have a 1.8432 MHz clock for your UART.
- *
- * It'd be nice if someone built a serial card with a 24.576 MHz
- * clock, since the 16550A is capable of handling a top speed of 1.5
- * megabits/second; but this requires the faster clock.
- */
-#define BASE_BAUD ( 1843200 / 16 )
-
-/* Standard COM flags (except for COM4, because of the 8514 problem) */
-#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
-#else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
-#endif
-
-#define SERIAL_PORT_DFNS \
- /* UART CLK PORT IRQ FLAGS */ \
- { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
- { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
- { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
- { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-x86_64/setup.h b/include/asm-x86_64/setup.h
deleted file mode 100644
index eaeff73..0000000
--- a/include/asm-x86_64/setup.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _x8664_SETUP_H
-#define _x8664_SETUP_H
-
-#define COMMAND_LINE_SIZE 2048
-
-#endif
diff --git a/include/asm-x86_64/shmbuf.h b/include/asm-x86_64/shmbuf.h
deleted file mode 100644
index 5a6d6dd..0000000
--- a/include/asm-x86_64/shmbuf.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _X8664_SHMBUF_H
-#define _X8664_SHMBUF_H
-
-/*
- * The shmid64_ds structure for x8664 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 2 miscellaneous 64-bit values
- */
-
-struct shmid64_ds {
- struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
- __kernel_pid_t shm_cpid; /* pid of creator */
- __kernel_pid_t shm_lpid; /* pid of last operator */
- unsigned long shm_nattch; /* no. of current attaches */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-struct shminfo64 {
- unsigned long shmmax;
- unsigned long shmmin;
- unsigned long shmmni;
- unsigned long shmseg;
- unsigned long shmall;
- unsigned long __unused1;
- unsigned long __unused2;
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif
diff --git a/include/asm-x86_64/shmparam.h b/include/asm-x86_64/shmparam.h
deleted file mode 100644
index d702162..0000000
--- a/include/asm-x86_64/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASMX8664_SHMPARAM_H
-#define _ASMX8664_SHMPARAM_H
-
-#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
-
-#endif /* _ASMX8664_SHMPARAM_H */
diff --git a/include/asm-x86_64/sigcontext.h b/include/asm-x86_64/sigcontext.h
deleted file mode 100644
index b4e4023..0000000
--- a/include/asm-x86_64/sigcontext.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _ASM_X86_64_SIGCONTEXT_H
-#define _ASM_X86_64_SIGCONTEXT_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-/* FXSAVE frame */
-/* Note: reserved1/2 may someday contain valuable data. Always save/restore
- them when you change signal frames. */
-struct _fpstate {
- __u16 cwd;
- __u16 swd;
- __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
- __u16 fop;
- __u64 rip;
- __u64 rdp;
- __u32 mxcsr;
- __u32 mxcsr_mask;
- __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
- __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
- __u32 reserved2[24];
-};
-
-struct sigcontext {
- unsigned long r8;
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- unsigned long r13;
- unsigned long r14;
- unsigned long r15;
- unsigned long rdi;
- unsigned long rsi;
- unsigned long rbp;
- unsigned long rbx;
- unsigned long rdx;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rsp;
- unsigned long rip;
- unsigned long eflags; /* RFLAGS */
- unsigned short cs;
- unsigned short gs;
- unsigned short fs;
- unsigned short __pad0;
- unsigned long err;
- unsigned long trapno;
- unsigned long oldmask;
- unsigned long cr2;
- struct _fpstate __user *fpstate; /* zero when no FPU context */
- unsigned long reserved1[8];
-};
-
-#endif
diff --git a/include/asm-x86_64/sigcontext32.h b/include/asm-x86_64/sigcontext32.h
deleted file mode 100644
index 3d65703..0000000
--- a/include/asm-x86_64/sigcontext32.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef _SIGCONTEXT32_H
-#define _SIGCONTEXT32_H 1
-
-/* signal context for 32bit programs. */
-
-#define X86_FXSR_MAGIC 0x0000
-
-struct _fpreg {
- unsigned short significand[4];
- unsigned short exponent;
-};
-
-struct _fpxreg {
- unsigned short significand[4];
- unsigned short exponent;
- unsigned short padding[3];
-};
-
-struct _xmmreg {
- __u32 element[4];
-};
-
-/* FSAVE frame with extensions */
-struct _fpstate_ia32 {
- /* Regular FPU environment */
- __u32 cw;
- __u32 sw;
- __u32 tag; /* not compatible to 64bit twd */
- __u32 ipoff;
- __u32 cssel;
- __u32 dataoff;
- __u32 datasel;
- struct _fpreg _st[8];
- unsigned short status;
- unsigned short magic; /* 0xffff = regular FPU data only */
-
- /* FXSR FPU environment */
- __u32 _fxsr_env[6];
- __u32 mxcsr;
- __u32 reserved;
- struct _fpxreg _fxsr_st[8];
- struct _xmmreg _xmm[8]; /* It's actually 16 */
- __u32 padding[56];
-};
-
-struct sigcontext_ia32 {
- unsigned short gs, __gsh;
- unsigned short fs, __fsh;
- unsigned short es, __esh;
- unsigned short ds, __dsh;
- unsigned int edi;
- unsigned int esi;
- unsigned int ebp;
- unsigned int esp;
- unsigned int ebx;
- unsigned int edx;
- unsigned int ecx;
- unsigned int eax;
- unsigned int trapno;
- unsigned int err;
- unsigned int eip;
- unsigned short cs, __csh;
- unsigned int eflags;
- unsigned int esp_at_signal;
- unsigned short ss, __ssh;
- unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
- unsigned int oldmask;
- unsigned int cr2;
-};
-
-#endif
diff --git a/include/asm-x86_64/siginfo.h b/include/asm-x86_64/siginfo.h
deleted file mode 100644
index d09a1e6..0000000
--- a/include/asm-x86_64/siginfo.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _X8664_SIGINFO_H
-#define _X8664_SIGINFO_H
-
-#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h
deleted file mode 100644
index 4581f97..0000000
--- a/include/asm-x86_64/signal.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef _ASMx8664_SIGNAL_H
-#define _ASMx8664_SIGNAL_H
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#include <linux/time.h>
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifdef __KERNEL__
-#include <linux/linkage.h>
-/* Most things should be clean enough to redefine this at will, if care
- is taken to make libc match. */
-
-#define _NSIG 64
-#define _NSIG_BPW 64
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-#endif
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal.h>
-
-#ifndef __ASSEMBLY__
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
-#include <asm/sigcontext.h>
-
-#undef __HAVE_ARCH_SIG_BITOPS
-#if 0
-
-static inline void sigaddset(sigset_t *set, int _sig)
-{
- __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
-}
-
-static inline void sigdelset(sigset_t *set, int _sig)
-{
- __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
-}
-
-static inline int __const_sigismember(sigset_t *set, int _sig)
-{
- unsigned long sig = _sig - 1;
- return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1)));
-}
-
-static inline int __gen_sigismember(sigset_t *set, int _sig)
-{
- int ret;
- __asm__("btq %2,%1\n\tsbbq %0,%0"
- : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
- return ret;
-}
-
-#define sigismember(set,sig) \
- (__builtin_constant_p(sig) ? \
- __const_sigismember((set),(sig)) : \
- __gen_sigismember((set),(sig)))
-
-static inline int sigfindinword(unsigned long word)
-{
- __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc");
- return word;
-}
-#endif
-#endif
-
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
deleted file mode 100644
index 3f303d2..0000000
--- a/include/asm-x86_64/smp.h
+++ /dev/null
@@ -1,117 +0,0 @@
-#ifndef __ASM_SMP_H
-#define __ASM_SMP_H
-
-/*
- * We need the APIC definitions automatically as part of 'smp.h'
- */
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-extern int disable_apic;
-
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#include <asm/io_apic.h>
-#include <asm/thread_info.h>
-
-#ifdef CONFIG_SMP
-
-#include <asm/pda.h>
-
-struct pt_regs;
-
-extern cpumask_t cpu_present_mask;
-extern cpumask_t cpu_possible_map;
-extern cpumask_t cpu_online_map;
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_initialized;
-
-/*
- * Private routines/data
- */
-
-extern void smp_alloc_memory(void);
-extern volatile unsigned long smp_invalidate_needed;
-extern void lock_ipi_call_lock(void);
-extern void unlock_ipi_call_lock(void);
-extern int smp_num_siblings;
-extern void smp_send_reschedule(int cpu);
-
-extern cpumask_t cpu_sibling_map[NR_CPUS];
-extern cpumask_t cpu_core_map[NR_CPUS];
-extern u8 cpu_llc_id[NR_CPUS];
-
-#define SMP_TRAMPOLINE_BASE 0x6000
-
-/*
- * On x86 all CPUs are mapped 1:1 to the APIC space.
- * This simplifies scheduling and IPI sending and
- * compresses data structures.
- */
-
-static inline int num_booting_cpus(void)
-{
- return cpus_weight(cpu_callout_map);
-}
-
-#define raw_smp_processor_id() read_pda(cpunumber)
-
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
-extern void prefill_possible_map(void);
-extern unsigned num_processors;
-extern unsigned __cpuinitdata disabled_cpus;
-
-#define NO_PROC_ID 0xFF /* No processor magic marker */
-
-#endif /* CONFIG_SMP */
-
-static inline int hard_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
-}
-
-/*
- * Some lowlevel functions might want to know about
- * the real APIC ID <-> CPU # mapping.
- */
-extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
-extern u8 x86_cpu_to_log_apicid[NR_CPUS];
-extern u8 bios_cpu_apicid[];
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < NR_CPUS)
- return (int)bios_cpu_apicid[mps_cpu];
- else
- return BAD_APICID;
-}
-
-#ifndef CONFIG_SMP
-#define stack_smp_processor_id() 0
-#define cpu_logical_map(x) (x)
-#else
-#include <asm/thread_info.h>
-#define stack_smp_processor_id() \
-({ \
- struct thread_info *ti; \
- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
- ti->cpu; \
-})
-#endif
-
-static __inline int logical_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
-}
-
-#ifdef CONFIG_SMP
-#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
-#else
-#define cpu_physical_id(cpu) boot_cpu_id
-#endif /* !CONFIG_SMP */
-#endif
-
diff --git a/include/asm-x86_64/socket.h b/include/asm-x86_64/socket.h
deleted file mode 100644
index 90af60c..0000000
--- a/include/asm-x86_64/socket.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-x86_64/sockios.h b/include/asm-x86_64/sockios.h
deleted file mode 100644
index d726ba25..0000000
--- a/include/asm-x86_64/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_X8664_SOCKIOS__
-#define __ARCH_X8664_SOCKIOS__
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif
diff --git a/include/asm-x86_64/sparsemem.h b/include/asm-x86_64/sparsemem.h
deleted file mode 100644
index dabb167..0000000
--- a/include/asm-x86_64/sparsemem.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _ASM_X86_64_SPARSEMEM_H
-#define _ASM_X86_64_SPARSEMEM_H 1
-
-#ifdef CONFIG_SPARSEMEM
-
-/*
- * generic non-linear memory support:
- *
- * 1) we will not split memory into more chunks than will fit into the flags
- * field of the struct page
- *
- * SECTION_SIZE_BITS 2^n: size of each section
- * MAX_PHYSADDR_BITS 2^n: max size of physical address space
- * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space
- *
- */
-
-#define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */
-#define MAX_PHYSADDR_BITS 40
-#define MAX_PHYSMEM_BITS 40
-
-extern int early_pfn_to_nid(unsigned long pfn);
-
-#endif /* CONFIG_SPARSEMEM */
-
-#endif /* _ASM_X86_64_SPARSEMEM_H */
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
deleted file mode 100644
index 88bf981..0000000
--- a/include/asm-x86_64/spinlock.h
+++ /dev/null
@@ -1,167 +0,0 @@
-#ifndef __ASM_SPINLOCK_H
-#define __ASM_SPINLOCK_H
-
-#include <asm/atomic.h>
-#include <asm/rwlock.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- *
- * Simple spin lock operations. There are two variants, one clears IRQ's
- * on the local processor, one does not.
- *
- * We make no fairness assumptions. They have a cost.
- *
- * (the type definitions are in asm/spinlock_types.h)
- */
-
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
-{
- return *(volatile signed int *)(&(lock)->slock) <= 0;
-}
-
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
-{
- asm volatile(
- "\n1:\t"
- LOCK_PREFIX " ; decl %0\n\t"
- "jns 2f\n"
- "3:\n"
- "rep;nop\n\t"
- "cmpl $0,%0\n\t"
- "jle 3b\n\t"
- "jmp 1b\n"
- "2:\t" : "=m" (lock->slock) : : "memory");
-}
-
-/*
- * Same as __raw_spin_lock, but reenable interrupts during spinning.
- */
-#ifndef CONFIG_PROVE_LOCKING
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
-{
- asm volatile(
- "\n1:\t"
- LOCK_PREFIX " ; decl %0\n\t"
- "jns 5f\n"
- "testl $0x200, %1\n\t" /* interrupts were disabled? */
- "jz 4f\n\t"
- "sti\n"
- "3:\t"
- "rep;nop\n\t"
- "cmpl $0, %0\n\t"
- "jle 3b\n\t"
- "cli\n\t"
- "jmp 1b\n"
- "4:\t"
- "rep;nop\n\t"
- "cmpl $0, %0\n\t"
- "jg 1b\n\t"
- "jmp 4b\n"
- "5:\n\t"
- : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
-}
-#endif
-
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
-{
- int oldval;
-
- asm volatile(
- "xchgl %0,%1"
- :"=q" (oldval), "=m" (lock->slock)
- :"0" (0) : "memory");
-
- return oldval > 0;
-}
-
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
-{
- asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory");
-}
-
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
-{
- while (__raw_spin_is_locked(lock))
- cpu_relax();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts
- * but no interrupt writers. For those circumstances we
- * can "mix" irq-safe locks - any writer needs to get a
- * irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- *
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
- */
-
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
-{
- return (int)(lock)->lock > 0;
-}
-
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
-{
- return (lock)->lock == RW_LOCK_BIAS;
-}
-
-static inline void __raw_read_lock(raw_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t"
- "jns 1f\n"
- "call __read_lock_failed\n"
- "1:\n"
- ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
-}
-
-static inline void __raw_write_lock(raw_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t"
- "jz 1f\n"
- "\tcall __write_lock_failed\n\t"
- "1:\n"
- ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
-}
-
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
-{
- atomic_t *count = (atomic_t *)lock;
- atomic_dec(count);
- if (atomic_read(count) >= 0)
- return 1;
- atomic_inc(count);
- return 0;
-}
-
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
-{
- atomic_t *count = (atomic_t *)lock;
- if (atomic_sub_and_test(RW_LOCK_BIAS, count))
- return 1;
- atomic_add(RW_LOCK_BIAS, count);
- return 0;
-}
-
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
-}
-
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
- : "=m" (rw->lock) : : "memory");
-}
-
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
-
-#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h
deleted file mode 100644
index 4da9345..0000000
--- a/include/asm-x86_64/spinlock_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __ASM_SPINLOCK_TYPES_H
-#define __ASM_SPINLOCK_TYPES_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
-typedef struct {
- unsigned int slock;
-} raw_spinlock_t;
-
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
-
-typedef struct {
- unsigned int lock;
-} raw_rwlock_t;
-
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
-
-#endif
diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h
deleted file mode 100644
index 6f0b545..0000000
--- a/include/asm-x86_64/stacktrace.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_STACKTRACE_H
-#define _ASM_STACKTRACE_H 1
-
-extern int kstack_depth_to_print;
-
-/* Generic stack tracer with callbacks */
-
-struct stacktrace_ops {
- void (*warning)(void *data, char *msg);
- /* msg must contain %s for the symbol */
- void (*warning_symbol)(void *data, char *msg, unsigned long symbol);
- void (*address)(void *data, unsigned long address);
- /* On negative return stop dumping */
- int (*stack)(void *data, char *name);
-};
-
-void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
- struct stacktrace_ops *ops, void *data);
-
-#endif
diff --git a/include/asm-x86_64/stat.h b/include/asm-x86_64/stat.h
deleted file mode 100644
index fd9f00d..0000000
--- a/include/asm-x86_64/stat.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _ASM_X86_64_STAT_H
-#define _ASM_X86_64_STAT_H
-
-#define STAT_HAVE_NSEC 1
-
-struct stat {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned long st_nlink;
-
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad0;
- unsigned long st_rdev;
- long st_size;
- long st_blksize;
- long st_blocks; /* Number 512-byte blocks allocated. */
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- long __unused[3];
-};
-
-/* For 32bit emulation */
-struct __old_kernel_stat {
- unsigned short st_dev;
- unsigned short st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned int st_size;
- unsigned int st_atime;
- unsigned int st_mtime;
- unsigned int st_ctime;
-};
-
-#endif
diff --git a/include/asm-x86_64/statfs.h b/include/asm-x86_64/statfs.h
deleted file mode 100644
index b3f4718..0000000
--- a/include/asm-x86_64/statfs.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _X86_64_STATFS_H
-#define _X86_64_STATFS_H
-
-#ifndef __KERNEL_STRICT_NAMES
-
-#include <linux/types.h>
-
-typedef __kernel_fsid_t fsid_t;
-
-#endif
-
-/*
- * This is ugly -- we're already 64-bit clean, so just duplicate the
- * definitions.
- */
-struct statfs {
- long f_type;
- long f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
- __kernel_fsid_t f_fsid;
- long f_namelen;
- long f_frsize;
- long f_spare[5];
-};
-
-struct statfs64 {
- long f_type;
- long f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
- __kernel_fsid_t f_fsid;
- long f_namelen;
- long f_frsize;
- long f_spare[5];
-};
-
-struct compat_statfs64 {
- __u32 f_type;
- __u32 f_bsize;
- __u64 f_blocks;
- __u64 f_bfree;
- __u64 f_bavail;
- __u64 f_files;
- __u64 f_ffree;
- __kernel_fsid_t f_fsid;
- __u32 f_namelen;
- __u32 f_frsize;
- __u32 f_spare[5];
-} __attribute__((packed));
-
-#endif
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
deleted file mode 100644
index e583da7..0000000
--- a/include/asm-x86_64/string.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _X86_64_STRING_H_
-#define _X86_64_STRING_H_
-
-#ifdef __KERNEL__
-
-/* Written 2002 by Andi Kleen */
-
-/* Only used for special circumstances. Stolen from i386/string.h */
-static __always_inline void *
-__inline_memcpy(void * to, const void * from, size_t n)
-{
-unsigned long d0, d1, d2;
-__asm__ __volatile__(
- "rep ; movsl\n\t"
- "testb $2,%b4\n\t"
- "je 1f\n\t"
- "movsw\n"
- "1:\ttestb $1,%b4\n\t"
- "je 2f\n\t"
- "movsb\n"
- "2:"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
- : "memory");
-return (to);
-}
-
-/* Even with __builtin_ the compiler may decide to use the out of line
- function. */
-
-#define __HAVE_ARCH_MEMCPY 1
-#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
-extern void *memcpy(void *to, const void *from, size_t len);
-#else
-extern void *__memcpy(void *to, const void *from, size_t len);
-#define memcpy(dst,src,len) \
- ({ size_t __len = (len); \
- void *__ret; \
- if (__builtin_constant_p(len) && __len >= 64) \
- __ret = __memcpy((dst),(src),__len); \
- else \
- __ret = __builtin_memcpy((dst),(src),__len); \
- __ret; })
-#endif
-
-#define __HAVE_ARCH_MEMSET
-void *memset(void *s, int c, size_t n);
-
-#define __HAVE_ARCH_MEMMOVE
-void * memmove(void * dest,const void *src,size_t count);
-
-int memcmp(const void * cs,const void * ct,size_t count);
-size_t strlen(const char * s);
-char *strcpy(char * dest,const char *src);
-char *strcat(char * dest, const char * src);
-int strcmp(const char * cs,const char * ct);
-
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h
deleted file mode 100644
index b897e8c..0000000
--- a/include/asm-x86_64/suspend.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2001-2003 Pavel Machek <pavel@suse.cz>
- * Based on code
- * Copyright 2001 Patrick Mochel <mochel@osdl.org>
- */
-#include <asm/desc.h>
-#include <asm/i387.h>
-
-static inline int
-arch_prepare_suspend(void)
-{
- return 0;
-}
-
-/* Image of the saved processor state. If you touch this, fix acpi_wakeup.S. */
-struct saved_context {
- u16 ds, es, fs, gs, ss;
- unsigned long gs_base, gs_kernel_base, fs_base;
- unsigned long cr0, cr2, cr3, cr4, cr8;
- unsigned long efer;
- u16 gdt_pad;
- u16 gdt_limit;
- unsigned long gdt_base;
- u16 idt_pad;
- u16 idt_limit;
- unsigned long idt_base;
- u16 ldt;
- u16 tss;
- unsigned long tr;
- unsigned long safety;
- unsigned long return_address;
- unsigned long eflags;
-} __attribute__((packed));
-
-/* We'll access these from assembly, so we'd better have them outside struct */
-extern unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
-extern unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
-extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
-extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
-extern unsigned long saved_context_eflags;
-
-#define loaddebug(thread,register) \
- set_debugreg((thread)->debugreg##register, register)
-
-extern void fix_processor_context(void);
-
-extern unsigned long saved_rip;
-extern unsigned long saved_rsp;
-extern unsigned long saved_rbp;
-extern unsigned long saved_rbx;
-extern unsigned long saved_rsi;
-extern unsigned long saved_rdi;
-
-/* routines for saving/restoring kernel state */
-extern int acpi_save_state_mem(void);
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
deleted file mode 100644
index f9c5895..0000000
--- a/include/asm-x86_64/swiotlb.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef _ASM_SWIOTLB_H
-#define _ASM_SWIOTLB_H 1
-
-#include <asm/dma-mapping.h>
-
-/* SWIOTLB interface */
-
-extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
- size_t size, int dir);
-extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags);
-extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir);
-extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t dev_addr,
- size_t size, int dir);
-extern void swiotlb_sync_single_for_device(struct device *hwdev,
- dma_addr_t dev_addr,
- size_t size, int dir);
-extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
- dma_addr_t dev_addr,
- unsigned long offset,
- size_t size, int dir);
-extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
- dma_addr_t dev_addr,
- unsigned long offset,
- size_t size, int dir);
-extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int dir);
-extern void swiotlb_sync_sg_for_device(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int dir);
-extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
-extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
-extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
-extern void swiotlb_init(void);
-
-extern int swiotlb_force;
-
-#ifdef CONFIG_SWIOTLB
-extern int swiotlb;
-#else
-#define swiotlb 0
-#endif
-
-extern void pci_swiotlb_init(void);
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
-#endif /* _ASM_SWIOTLB_H */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
deleted file mode 100644
index 02175aa..0000000
--- a/include/asm-x86_64/system.h
+++ /dev/null
@@ -1,180 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <linux/kernel.h>
-#include <asm/segment.h>
-#include <asm/cmpxchg.h>
-
-#ifdef __KERNEL__
-
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
-#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
-#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
-
-/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
-
-#define __EXTRA_CLOBBER \
- ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
-
-/* Save restore flags to clear handle leaking NT */
-#define switch_to(prev,next,last) \
- asm volatile(SAVE_CONTEXT \
- "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
- "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
- "call __switch_to\n\t" \
- ".globl thread_return\n" \
- "thread_return:\n\t" \
- "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
- "movq %P[thread_info](%%rsi),%%r8\n\t" \
- LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
- "movq %%rax,%%rdi\n\t" \
- "jc ret_from_fork\n\t" \
- RESTORE_CONTEXT \
- : "=a" (last) \
- : [next] "S" (next), [prev] "D" (prev), \
- [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
- [ti_flags] "i" (offsetof(struct thread_info, flags)),\
- [tif_fork] "i" (TIF_FORK), \
- [thread_info] "i" (offsetof(struct task_struct, stack)), \
- [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
- : "memory", "cc" __EXTRA_CLOBBER)
-
-extern void load_gs_index(unsigned);
-
-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value) \
- asm volatile("\n" \
- "1:\t" \
- "movl %k0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "movl %1,%%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 8\n\t" \
- ".quad 1b,3b\n" \
- ".previous" \
- : :"r" (value), "r" (0))
-
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
-
-static inline unsigned long read_cr0(void)
-{
- unsigned long cr0;
- asm volatile("movq %%cr0,%0" : "=r" (cr0));
- return cr0;
-}
-
-static inline void write_cr0(unsigned long val)
-{
- asm volatile("movq %0,%%cr0" :: "r" (val));
-}
-
-static inline unsigned long read_cr2(void)
-{
- unsigned long cr2;
- asm("movq %%cr2,%0" : "=r" (cr2));
- return cr2;
-}
-
-static inline void write_cr2(unsigned long val)
-{
- asm volatile("movq %0,%%cr2" :: "r" (val));
-}
-
-static inline unsigned long read_cr3(void)
-{
- unsigned long cr3;
- asm("movq %%cr3,%0" : "=r" (cr3));
- return cr3;
-}
-
-static inline void write_cr3(unsigned long val)
-{
- asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
-}
-
-static inline unsigned long read_cr4(void)
-{
- unsigned long cr4;
- asm("movq %%cr4,%0" : "=r" (cr4));
- return cr4;
-}
-
-static inline void write_cr4(unsigned long val)
-{
- asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
-}
-
-static inline unsigned long read_cr8(void)
-{
- unsigned long cr8;
- asm("movq %%cr8,%0" : "=r" (cr8));
- return cr8;
-}
-
-static inline void write_cr8(unsigned long val)
-{
- asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
-}
-
-#define stts() write_cr0(8 | read_cr0())
-
-#define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory")
-
-#endif /* __KERNEL__ */
-
-#define nop() __asm__ __volatile__ ("nop")
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() do {} while(0)
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do {} while(0)
-#endif
-
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- */
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-
-#ifdef CONFIG_UNORDERED_IO
-#define wmb() asm volatile("sfence" ::: "memory")
-#else
-#define wmb() asm volatile("" ::: "memory")
-#endif
-#define read_barrier_depends() do {} while(0)
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-
-#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
-
-#include <linux/irqflags.h>
-
-void cpu_idle_wait(void);
-
-extern unsigned long arch_align_stack(unsigned long sp);
-extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-
-#endif
diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h
deleted file mode 100644
index cd955d3..0000000
--- a/include/asm-x86_64/tce.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This file is derived from asm-powerpc/tce.h.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Muli Ben-Yehuda <muli@il.ibm.com>
- * Author: Jon Mason <jdmason@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_X86_64_TCE_H
-#define _ASM_X86_64_TCE_H
-
-extern unsigned int specified_table_size;
-struct iommu_table;
-
-#define TCE_ENTRY_SIZE 8 /* in bytes */
-
-#define TCE_READ_SHIFT 0
-#define TCE_WRITE_SHIFT 1
-#define TCE_HUBID_SHIFT 2 /* unused */
-#define TCE_RSVD_SHIFT 8 /* unused */
-#define TCE_RPN_SHIFT 12
-#define TCE_UNUSED_SHIFT 48 /* unused */
-
-#define TCE_RPN_MASK 0x0000fffffffff000ULL
-
-extern void tce_build(struct iommu_table *tbl, unsigned long index,
- unsigned int npages, unsigned long uaddr, int direction);
-extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
-extern void * __init alloc_tce_table(void);
-extern void __init free_tce_table(void *tbl);
-extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar);
-
-#endif /* _ASM_X86_64_TCE_H */
diff --git a/include/asm-x86_64/termbits.h b/include/asm-x86_64/termbits.h
deleted file mode 100644
index 7405756..0000000
--- a/include/asm-x86_64/termbits.h
+++ /dev/null
@@ -1,198 +0,0 @@
-#ifndef __ARCH_X8664_TERMBITS_H__
-#define __ARCH_X8664_TERMBITS_H__
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct termios2 {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000 /* non standard rate */
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif
diff --git a/include/asm-x86_64/termios.h b/include/asm-x86_64/termios.h
deleted file mode 100644
index 35ee59b..0000000
--- a/include/asm-x86_64/termios.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef _X8664_TERMIOS_H
-#define _X8664_TERMIOS_H
-
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
- unsigned short __tmp; \
- get_user(__tmp,&(termio)->x); \
- *(unsigned short *) &(termios)->x = __tmp; \
-}
-
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
- SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
- copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
- put_user((termios)->c_iflag, &(termio)->c_iflag); \
- put_user((termios)->c_oflag, &(termio)->c_oflag); \
- put_user((termios)->c_cflag, &(termio)->c_cflag); \
- put_user((termios)->c_lflag, &(termio)->c_lflag); \
- put_user((termios)->c_line, &(termio)->c_line); \
- copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* __KERNEL__ */
-
-#endif /* _X8664_TERMIOS_H */
diff --git a/include/asm-x86_64/therm_throt.h b/include/asm-x86_64/therm_throt.h
deleted file mode 100644
index 5aac059..0000000
--- a/include/asm-x86_64/therm_throt.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/therm_throt.h>
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
deleted file mode 100644
index beae2bf..0000000
--- a/include/asm-x86_64/thread_info.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/* thread_info.h: x86_64 low-level thread information
- *
- * Copyright (C) 2002 David Howells (dhowells@redhat.com)
- * - Incorporating suggestions made by Linus Torvalds and Dave Miller
- */
-
-#ifndef _ASM_THREAD_INFO_H
-#define _ASM_THREAD_INFO_H
-
-#ifdef __KERNEL__
-
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/pda.h>
-
-/*
- * low level task data that entry.S needs immediate access to
- * - this struct should fit entirely inside of one cache line
- * - this struct shares the supervisor stack pages
- */
-#ifndef __ASSEMBLY__
-struct task_struct;
-struct exec_domain;
-#include <asm/mmsegment.h>
-
-struct thread_info {
- struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
- __u32 flags; /* low level flags */
- __u32 status; /* thread synchronous flags */
- __u32 cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
-
- mm_segment_t addr_limit;
- struct restart_block restart_block;
-};
-#endif
-
-/*
- * macros/functions for gaining access to the thread information structure
- * preempt_count needs to be 1 initially, until the scheduler is functional.
- */
-#ifndef __ASSEMBLY__
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .task = &tsk, \
- .exec_domain = &default_exec_domain, \
- .flags = 0, \
- .cpu = 0, \
- .preempt_count = 1, \
- .addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
-}
-
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
-static inline struct thread_info *current_thread_info(void)
-{
- struct thread_info *ti;
- ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
- return ti;
-}
-
-/* do not use in interrupt context */
-static inline struct thread_info *stack_thread_info(void)
-{
- struct thread_info *ti;
- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
- return ti;
-}
-
-/* thread information allocation */
-#ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(tsk) \
- ({ \
- struct thread_info *ret; \
- \
- ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \
- if (ret) \
- memset(ret, 0, THREAD_SIZE); \
- ret; \
- })
-#else
-#define alloc_thread_info(tsk) \
- ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
-#endif
-
-#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
-
-#else /* !__ASSEMBLY__ */
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) \
- movq %gs:pda_kernelstack,reg ; \
- subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
-
-#endif
-
-/*
- * thread information flags
- * - these are process state flags that various assembly files may need to access
- * - pending work-to-be-done flags are in LSW
- * - other flags in MSW
- * Warning: layout of LSW is hardcoded in entry.S
- */
-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_SIGPENDING 2 /* signal pending */
-#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
-#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
-#define TIF_IRET 5 /* force IRET */
-#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
-#define TIF_SECCOMP 8 /* secure computing */
-#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
-#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
-/* 16 free */
-#define TIF_IA32 17 /* 32bit process */
-#define TIF_FORK 18 /* ret_from_fork */
-#define TIF_ABI_PENDING 19
-#define TIF_MEMDIE 20
-#define TIF_DEBUG 21 /* uses debug registers */
-#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
-#define TIF_FREEZE 23 /* is freezing for suspend */
-
-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
-#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_IRET (1<<TIF_IRET)
-#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP (1<<TIF_SECCOMP)
-#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY)
-#define _TIF_IA32 (1<<TIF_IA32)
-#define _TIF_FORK (1<<TIF_FORK)
-#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
-#define _TIF_DEBUG (1<<TIF_DEBUG)
-#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
-
-/* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK \
- (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
-/* work to do on any return to user space */
-#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
-
-/* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
-
-#define PREEMPT_ACTIVE 0x10000000
-
-/*
- * Thread-synchronous status.
- *
- * This is different from the flags in that nobody else
- * ever touches our thread-synchronous status, so we don't
- * have to worry about atomic accesses.
- */
-#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
-#define TS_COMPAT 0x0002 /* 32bit syscall active */
-#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
-
-#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
deleted file mode 100644
index 6ed21f4..0000000
--- a/include/asm-x86_64/timex.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * linux/include/asm-x86_64/timex.h
- *
- * x86-64 architecture timex specifications
- */
-#ifndef _ASMx8664_TIMEX_H
-#define _ASMx8664_TIMEX_H
-
-#include <asm/8253pit.h>
-#include <asm/msr.h>
-#include <asm/vsyscall.h>
-#include <asm/system.h>
-#include <asm/processor.h>
-#include <asm/tsc.h>
-#include <linux/compiler.h>
-
-#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
-
-extern int read_current_timer(unsigned long *timer_value);
-#define ARCH_HAS_READ_CURRENT_TIMER 1
-
-#define USEC_PER_TICK (USEC_PER_SEC / HZ)
-#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
-#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
-
-#define NS_SCALE 10 /* 2^10, carefully chosen */
-#define US_SCALE 32 /* 2^32, arbitralrily chosen */
-
-extern void mark_tsc_unstable(char *msg);
-extern void set_cyc2ns_scale(unsigned long khz);
-#endif
diff --git a/include/asm-x86_64/tlb.h b/include/asm-x86_64/tlb.h
deleted file mode 100644
index cd4c3c5..0000000
--- a/include/asm-x86_64/tlb.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef TLB_H
-#define TLB_H 1
-
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-#include <asm-generic/tlb.h>
-
-#endif
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
deleted file mode 100644
index 888eb4a..0000000
--- a/include/asm-x86_64/tlbflush.h
+++ /dev/null
@@ -1,109 +0,0 @@
-#ifndef _X8664_TLBFLUSH_H
-#define _X8664_TLBFLUSH_H
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-
-static inline void __flush_tlb(void)
-{
- write_cr3(read_cr3());
-}
-
-static inline void __flush_tlb_all(void)
-{
- unsigned long cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
- write_cr4(cr4); /* write old PGE again and flush TLBs */
-}
-
-#define __flush_tlb_one(addr) \
- __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
-
-
-/*
- * TLB flushing:
- *
- * - flush_tlb() flushes the current mm struct TLBs
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(vma, start, end) flushes a range of pages
- * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- *
- * x86-64 can only flush individual pages or full VMs. For a range flush
- * we always do the full VM. Might be worth trying if for a small
- * range a few INVLPGs in a row are a win.
- */
-
-#ifndef CONFIG_SMP
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm == current->active_mm)
- __flush_tlb();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb_one(addr);
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb();
-}
-
-#else
-
-#include <asm/smp.h>
-
-#define local_flush_tlb() \
- __flush_tlb()
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-
-#define flush_tlb() flush_tlb_current_task()
-
-static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
-{
- flush_tlb_mm(vma->vm_mm);
-}
-
-#define TLBSTATE_OK 1
-#define TLBSTATE_LAZY 2
-
-/* Roughly an IPI every 20MB with 4k pages for freeing page table
- ranges. Cost is about 42k of memory for each CPU. */
-#define ARCH_FREE_PTE_NR 5350
-
-#endif
-
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- flush_tlb_all();
-}
-
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* x86_64 does not keep any page table caches in a software TLB.
- The CPUs do in their hardware TLBs, but they are handled
- by the normal TLB flushing algorithms. */
-}
-
-#endif /* _X8664_TLBFLUSH_H */
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
deleted file mode 100644
index 36e52fb..0000000
--- a/include/asm-x86_64/topology.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef _ASM_X86_64_TOPOLOGY_H
-#define _ASM_X86_64_TOPOLOGY_H
-
-
-#ifdef CONFIG_NUMA
-
-#include <asm/mpspec.h>
-#include <asm/bitops.h>
-
-extern cpumask_t cpu_online_map;
-
-extern unsigned char cpu_to_node[];
-extern cpumask_t node_to_cpumask[];
-
-#ifdef CONFIG_ACPI_NUMA
-extern int __node_distance(int, int);
-#define node_distance(a,b) __node_distance(a,b)
-/* #else fallback version */
-#endif
-
-#define cpu_to_node(cpu) (cpu_to_node[cpu])
-#define parent_node(node) (node)
-#define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node]))
-#define node_to_cpumask(node) (node_to_cpumask[node])
-#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node
-#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus));
-
-#define numa_node_id() read_pda(nodenumber)
-
-/* sched_domains SD_NODE_INIT for x86_64 machines */
-#define SD_NODE_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
- .parent = NULL, \
- .child = NULL, \
- .groups = NULL, \
- .min_interval = 8, \
- .max_interval = 32, \
- .busy_factor = 32, \
- .imbalance_pct = 125, \
- .cache_nice_tries = 2, \
- .busy_idx = 3, \
- .idle_idx = 2, \
- .newidle_idx = 0, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_FORK \
- | SD_BALANCE_EXEC \
- | SD_SERIALIZE \
- | SD_WAKE_BALANCE, \
- .last_balance = jiffies, \
- .balance_interval = 1, \
- .nr_balance_failed = 0, \
-}
-
-#endif
-
-#ifdef CONFIG_SMP
-#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
-#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
-#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
-#define smt_capable() (smp_num_siblings > 1)
-#endif
-
-#include <asm-generic/topology.h>
-
-extern cpumask_t cpu_coregroup_map(int cpu);
-
-#endif
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h
deleted file mode 100644
index d66ba6e..0000000
--- a/include/asm-x86_64/tsc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-i386/tsc.h>
diff --git a/include/asm-x86_64/types.h b/include/asm-x86_64/types.h
deleted file mode 100644
index 2d4491a..0000000
--- a/include/asm-x86_64/types.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _X86_64_TYPES_H
-#define _X86_64_TYPES_H
-
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-/*
- * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
- * header files exported to user space
- */
-
-typedef __signed__ char __s8;
-typedef unsigned char __u8;
-
-typedef __signed__ short __s16;
-typedef unsigned short __u16;
-
-typedef __signed__ int __s32;
-typedef unsigned int __u32;
-
-typedef __signed__ long long __s64;
-typedef unsigned long long __u64;
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#ifdef __KERNEL__
-
-#define BITS_PER_LONG 64
-
-#ifndef __ASSEMBLY__
-
-typedef signed char s8;
-typedef unsigned char u8;
-
-typedef signed short s16;
-typedef unsigned short u16;
-
-typedef signed int s32;
-typedef unsigned int u32;
-
-typedef signed long long s64;
-typedef unsigned long long u64;
-
-typedef u64 dma64_addr_t;
-typedef u64 dma_addr_t;
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
deleted file mode 100644
index f4ce876..0000000
--- a/include/asm-x86_64/uaccess.h
+++ /dev/null
@@ -1,384 +0,0 @@
-#ifndef __X86_64_UACCESS_H
-#define __X86_64_UACCESS_H
-
-/*
- * User space memory access functions
- */
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/prefetch.h>
-#include <asm/page.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
-#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
-
-#define get_ds() (KERNEL_DS)
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
-#define segment_eq(a,b) ((a).seg == (b).seg)
-
-#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
-
-/*
- * Uhhuh, this needs 65-bit arithmetic. We have a carry..
- */
-#define __range_not_ok(addr,size) ({ \
- unsigned long flag,roksum; \
- __chk_user_ptr(addr); \
- asm("# range_ok\n\r" \
- "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
- :"=&r" (flag), "=r" (roksum) \
- :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
- flag; })
-
-#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-#define ARCH_HAS_SEARCH_EXTABLE
-
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * This gets kind of ugly. We want to return _two_ values in "get_user()"
- * and yet we don't want to do any pointers, because that is too much
- * of a performance impact. Thus we have a few rather ugly macros here,
- * and hide all the ugliness from the user.
- *
- * The "__xxx" versions of the user access functions are versions that
- * do not verify the address space, that must have been done previously
- * with a separate "access_ok()" call (this is used when we do multiple
- * accesses to the same area of user memory).
- */
-
-#define __get_user_x(size,ret,x,ptr) \
- asm volatile("call __get_user_" #size \
- :"=a" (ret),"=d" (x) \
- :"c" (ptr) \
- :"r8")
-
-/* Careful: we have to cast the result to the type of the pointer for sign reasons */
-#define get_user(x,ptr) \
-({ unsigned long __val_gu; \
- int __ret_gu; \
- __chk_user_ptr(ptr); \
- switch(sizeof (*(ptr))) { \
- case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
- case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
- case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
- case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
- default: __get_user_bad(); break; \
- } \
- (x) = (__force typeof(*(ptr)))__val_gu; \
- __ret_gu; \
-})
-
-extern void __put_user_1(void);
-extern void __put_user_2(void);
-extern void __put_user_4(void);
-extern void __put_user_8(void);
-extern void __put_user_bad(void);
-
-#define __put_user_x(size,ret,x,ptr) \
- asm volatile("call __put_user_" #size \
- :"=a" (ret) \
- :"c" (ptr),"d" (x) \
- :"r8")
-
-#define put_user(x,ptr) \
- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-#define __get_user(x,ptr) \
- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __put_user(x,ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-#define __get_user_unaligned __get_user
-#define __put_user_unaligned __put_user
-
-#define __put_user_nocheck(x,ptr,size) \
-({ \
- int __pu_err; \
- __put_user_size((x),(ptr),(size),__pu_err); \
- __pu_err; \
-})
-
-
-#define __put_user_check(x,ptr,size) \
-({ \
- int __pu_err; \
- typeof(*(ptr)) __user *__pu_addr = (ptr); \
- switch (size) { \
- case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
- case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
- case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \
- case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \
- default: __put_user_bad(); \
- } \
- __pu_err; \
-})
-
-#define __put_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- __chk_user_ptr(ptr); \
- switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
- case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
- case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
- case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
- default: __put_user_bad(); \
- } \
-} while (0)
-
-/* FIXME: this hack is definitely wrong -AK */
-struct __large_struct { unsigned long buf[100]; };
-#define __m(x) (*(struct __large_struct __user *)(x))
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
- asm volatile( \
- "1: mov"itype" %"rtype"1,%2\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 8\n" \
- " .quad 1b,3b\n" \
- ".previous" \
- : "=r"(err) \
- : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
-
-
-#define __get_user_nocheck(x,ptr,size) \
-({ \
- int __gu_err; \
- unsigned long __gu_val; \
- __get_user_size(__gu_val,(ptr),(size),__gu_err); \
- (x) = (__force typeof(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-extern int __get_user_1(void);
-extern int __get_user_2(void);
-extern int __get_user_4(void);
-extern int __get_user_8(void);
-extern int __get_user_bad(void);
-
-#define __get_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- __chk_user_ptr(ptr); \
- switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
- case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
- case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
- case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
- default: (x) = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
- asm volatile( \
- "1: mov"itype" %2,%"rtype"1\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " xor"itype" %"rtype"1,%"rtype"1\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 8\n" \
- " .quad 1b,3b\n" \
- ".previous" \
- : "=r"(err), ltype (x) \
- : "m"(__m(addr)), "i"(errno), "0"(err))
-
-/*
- * Copy To/From Userspace
- */
-
-/* Handles exceptions in both to and from, but doesn't do access_ok */
-__must_check unsigned long
-copy_user_generic(void *to, const void *from, unsigned len);
-
-__must_check unsigned long
-copy_to_user(void __user *to, const void *from, unsigned len);
-__must_check unsigned long
-copy_from_user(void *to, const void __user *from, unsigned len);
-__must_check unsigned long
-copy_in_user(void __user *to, const void __user *from, unsigned len);
-
-static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
-{
- int ret = 0;
- if (!__builtin_constant_p(size))
- return copy_user_generic(dst,(__force void *)src,size);
- switch (size) {
- case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
- return ret;
- case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
- return ret;
- case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
- return ret;
- case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
- return ret;
- case 10:
- __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
- if (unlikely(ret)) return ret;
- __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
- return ret;
- case 16:
- __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
- if (unlikely(ret)) return ret;
- __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
- return ret;
- default:
- return copy_user_generic(dst,(__force void *)src,size);
- }
-}
-
-static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
-{
- int ret = 0;
- if (!__builtin_constant_p(size))
- return copy_user_generic((__force void *)dst,src,size);
- switch (size) {
- case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
- return ret;
- case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
- return ret;
- case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
- return ret;
- case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
- return ret;
- case 10:
- __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
- if (unlikely(ret)) return ret;
- asm("":::"memory");
- __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
- return ret;
- case 16:
- __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
- if (unlikely(ret)) return ret;
- asm("":::"memory");
- __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
- return ret;
- default:
- return copy_user_generic((__force void *)dst,src,size);
- }
-}
-
-static __always_inline __must_check
-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
-{
- int ret = 0;
- if (!__builtin_constant_p(size))
- return copy_user_generic((__force void *)dst,(__force void *)src,size);
- switch (size) {
- case 1: {
- u8 tmp;
- __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1);
- if (likely(!ret))
- __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1);
- return ret;
- }
- case 2: {
- u16 tmp;
- __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2);
- if (likely(!ret))
- __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2);
- return ret;
- }
-
- case 4: {
- u32 tmp;
- __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4);
- if (likely(!ret))
- __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4);
- return ret;
- }
- case 8: {
- u64 tmp;
- __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8);
- if (likely(!ret))
- __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8);
- return ret;
- }
- default:
- return copy_user_generic((__force void *)dst,(__force void *)src,size);
- }
-}
-
-__must_check long
-strncpy_from_user(char *dst, const char __user *src, long count);
-__must_check long
-__strncpy_from_user(char *dst, const char __user *src, long count);
-__must_check long strnlen_user(const char __user *str, long n);
-__must_check long __strnlen_user(const char __user *str, long n);
-__must_check long strlen_user(const char __user *str);
-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
-
-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
-
-static __must_check __always_inline int
-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
-{
- return copy_user_generic((__force void *)dst, src, size);
-}
-
-#define ARCH_HAS_NOCACHE_UACCESS 1
-extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest);
-
-static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
-{
- might_sleep();
- return __copy_user_nocache(dst, src, size, 1);
-}
-
-static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size)
-{
- return __copy_user_nocache(dst, src, size, 0);
-}
-
-#endif /* __X86_64_UACCESS_H */
diff --git a/include/asm-x86_64/ucontext.h b/include/asm-x86_64/ucontext.h
deleted file mode 100644
index 159a3da..0000000
--- a/include/asm-x86_64/ucontext.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASMX8664_UCONTEXT_H
-#define _ASMX8664_UCONTEXT_H
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-#endif
diff --git a/include/asm-x86_64/unaligned.h b/include/asm-x86_64/unaligned.h
deleted file mode 100644
index d4bf78d..0000000
--- a/include/asm-x86_64/unaligned.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __X8664_UNALIGNED_H
-#define __X8664_UNALIGNED_H
-
-/*
- * The x86-64 can do unaligned accesses itself.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
- */
-
-/**
- * get_unaligned - get value from possibly mis-aligned location
- * @ptr: pointer to value
- *
- * This macro should be used for accessing values larger in size than
- * single bytes at locations that are expected to be improperly aligned,
- * e.g. retrieving a u16 value from a location not u16-aligned.
- *
- * Note that unaligned accesses can be very expensive on some architectures.
- */
-#define get_unaligned(ptr) (*(ptr))
-
-/**
- * put_unaligned - put value to a possibly mis-aligned location
- * @val: value to place
- * @ptr: pointer to location
- *
- * This macro should be used for placing values larger in size than
- * single bytes at locations that are expected to be improperly aligned,
- * e.g. writing a u16 value to a location not u16-aligned.
- *
- * Note that unaligned accesses can be very expensive on some architectures.
- */
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
-
-#endif
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
deleted file mode 100644
index fc4e73f..0000000
--- a/include/asm-x86_64/unistd.h
+++ /dev/null
@@ -1,687 +0,0 @@
-#ifndef _ASM_X86_64_UNISTD_H_
-#define _ASM_X86_64_UNISTD_H_
-
-#ifndef __SYSCALL
-#define __SYSCALL(a,b)
-#endif
-
-/*
- * This file contains the system call numbers.
- *
- * Note: holes are not allowed.
- */
-
-/* at least 8 syscall per cacheline */
-#define __NR_read 0
-__SYSCALL(__NR_read, sys_read)
-#define __NR_write 1
-__SYSCALL(__NR_write, sys_write)
-#define __NR_open 2
-__SYSCALL(__NR_open, sys_open)
-#define __NR_close 3
-__SYSCALL(__NR_close, sys_close)
-#define __NR_stat 4
-__SYSCALL(__NR_stat, sys_newstat)
-#define __NR_fstat 5
-__SYSCALL(__NR_fstat, sys_newfstat)
-#define __NR_lstat 6
-__SYSCALL(__NR_lstat, sys_newlstat)
-#define __NR_poll 7
-__SYSCALL(__NR_poll, sys_poll)
-
-#define __NR_lseek 8
-__SYSCALL(__NR_lseek, sys_lseek)
-#define __NR_mmap 9
-__SYSCALL(__NR_mmap, sys_mmap)
-#define __NR_mprotect 10
-__SYSCALL(__NR_mprotect, sys_mprotect)
-#define __NR_munmap 11
-__SYSCALL(__NR_munmap, sys_munmap)
-#define __NR_brk 12
-__SYSCALL(__NR_brk, sys_brk)
-#define __NR_rt_sigaction 13
-__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction)
-#define __NR_rt_sigprocmask 14
-__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
-#define __NR_rt_sigreturn 15
-__SYSCALL(__NR_rt_sigreturn, stub_rt_sigreturn)
-
-#define __NR_ioctl 16
-__SYSCALL(__NR_ioctl, sys_ioctl)
-#define __NR_pread64 17
-__SYSCALL(__NR_pread64, sys_pread64)
-#define __NR_pwrite64 18
-__SYSCALL(__NR_pwrite64, sys_pwrite64)
-#define __NR_readv 19
-__SYSCALL(__NR_readv, sys_readv)
-#define __NR_writev 20
-__SYSCALL(__NR_writev, sys_writev)
-#define __NR_access 21
-__SYSCALL(__NR_access, sys_access)
-#define __NR_pipe 22
-__SYSCALL(__NR_pipe, sys_pipe)
-#define __NR_select 23
-__SYSCALL(__NR_select, sys_select)
-
-#define __NR_sched_yield 24
-__SYSCALL(__NR_sched_yield, sys_sched_yield)
-#define __NR_mremap 25
-__SYSCALL(__NR_mremap, sys_mremap)
-#define __NR_msync 26
-__SYSCALL(__NR_msync, sys_msync)
-#define __NR_mincore 27
-__SYSCALL(__NR_mincore, sys_mincore)
-#define __NR_madvise 28
-__SYSCALL(__NR_madvise, sys_madvise)
-#define __NR_shmget 29
-__SYSCALL(__NR_shmget, sys_shmget)
-#define __NR_shmat 30
-__SYSCALL(__NR_shmat, sys_shmat)
-#define __NR_shmctl 31
-__SYSCALL(__NR_shmctl, sys_shmctl)
-
-#define __NR_dup 32
-__SYSCALL(__NR_dup, sys_dup)
-#define __NR_dup2 33
-__SYSCALL(__NR_dup2, sys_dup2)
-#define __NR_pause 34
-__SYSCALL(__NR_pause, sys_pause)
-#define __NR_nanosleep 35
-__SYSCALL(__NR_nanosleep, sys_nanosleep)
-#define __NR_getitimer 36
-__SYSCALL(__NR_getitimer, sys_getitimer)
-#define __NR_alarm 37
-__SYSCALL(__NR_alarm, sys_alarm)
-#define __NR_setitimer 38
-__SYSCALL(__NR_setitimer, sys_setitimer)
-#define __NR_getpid 39
-__SYSCALL(__NR_getpid, sys_getpid)
-
-#define __NR_sendfile 40
-__SYSCALL(__NR_sendfile, sys_sendfile64)
-#define __NR_socket 41
-__SYSCALL(__NR_socket, sys_socket)
-#define __NR_connect 42
-__SYSCALL(__NR_connect, sys_connect)
-#define __NR_accept 43
-__SYSCALL(__NR_accept, sys_accept)
-#define __NR_sendto 44
-__SYSCALL(__NR_sendto, sys_sendto)
-#define __NR_recvfrom 45
-__SYSCALL(__NR_recvfrom, sys_recvfrom)
-#define __NR_sendmsg 46
-__SYSCALL(__NR_sendmsg, sys_sendmsg)
-#define __NR_recvmsg 47
-__SYSCALL(__NR_recvmsg, sys_recvmsg)
-
-#define __NR_shutdown 48
-__SYSCALL(__NR_shutdown, sys_shutdown)
-#define __NR_bind 49
-__SYSCALL(__NR_bind, sys_bind)
-#define __NR_listen 50
-__SYSCALL(__NR_listen, sys_listen)
-#define __NR_getsockname 51
-__SYSCALL(__NR_getsockname, sys_getsockname)
-#define __NR_getpeername 52
-__SYSCALL(__NR_getpeername, sys_getpeername)
-#define __NR_socketpair 53
-__SYSCALL(__NR_socketpair, sys_socketpair)
-#define __NR_setsockopt 54
-__SYSCALL(__NR_setsockopt, sys_setsockopt)
-#define __NR_getsockopt 55
-__SYSCALL(__NR_getsockopt, sys_getsockopt)
-
-#define __NR_clone 56
-__SYSCALL(__NR_clone, stub_clone)
-#define __NR_fork 57
-__SYSCALL(__NR_fork, stub_fork)
-#define __NR_vfork 58
-__SYSCALL(__NR_vfork, stub_vfork)
-#define __NR_execve 59
-__SYSCALL(__NR_execve, stub_execve)
-#define __NR_exit 60
-__SYSCALL(__NR_exit, sys_exit)
-#define __NR_wait4 61
-__SYSCALL(__NR_wait4, sys_wait4)
-#define __NR_kill 62
-__SYSCALL(__NR_kill, sys_kill)
-#define __NR_uname 63
-__SYSCALL(__NR_uname, sys_uname)
-
-#define __NR_semget 64
-__SYSCALL(__NR_semget, sys_semget)
-#define __NR_semop 65
-__SYSCALL(__NR_semop, sys_semop)
-#define __NR_semctl 66
-__SYSCALL(__NR_semctl, sys_semctl)
-#define __NR_shmdt 67
-__SYSCALL(__NR_shmdt, sys_shmdt)
-#define __NR_msgget 68
-__SYSCALL(__NR_msgget, sys_msgget)
-#define __NR_msgsnd 69
-__SYSCALL(__NR_msgsnd, sys_msgsnd)
-#define __NR_msgrcv 70
-__SYSCALL(__NR_msgrcv, sys_msgrcv)
-#define __NR_msgctl 71
-__SYSCALL(__NR_msgctl, sys_msgctl)
-
-#define __NR_fcntl 72
-__SYSCALL(__NR_fcntl, sys_fcntl)
-#define __NR_flock 73
-__SYSCALL(__NR_flock, sys_flock)
-#define __NR_fsync 74
-__SYSCALL(__NR_fsync, sys_fsync)
-#define __NR_fdatasync 75
-__SYSCALL(__NR_fdatasync, sys_fdatasync)
-#define __NR_truncate 76
-__SYSCALL(__NR_truncate, sys_truncate)
-#define __NR_ftruncate 77
-__SYSCALL(__NR_ftruncate, sys_ftruncate)
-#define __NR_getdents 78
-__SYSCALL(__NR_getdents, sys_getdents)
-#define __NR_getcwd 79
-__SYSCALL(__NR_getcwd, sys_getcwd)
-
-#define __NR_chdir 80
-__SYSCALL(__NR_chdir, sys_chdir)
-#define __NR_fchdir 81
-__SYSCALL(__NR_fchdir, sys_fchdir)
-#define __NR_rename 82
-__SYSCALL(__NR_rename, sys_rename)
-#define __NR_mkdir 83
-__SYSCALL(__NR_mkdir, sys_mkdir)
-#define __NR_rmdir 84
-__SYSCALL(__NR_rmdir, sys_rmdir)
-#define __NR_creat 85
-__SYSCALL(__NR_creat, sys_creat)
-#define __NR_link 86
-__SYSCALL(__NR_link, sys_link)
-#define __NR_unlink 87
-__SYSCALL(__NR_unlink, sys_unlink)
-
-#define __NR_symlink 88
-__SYSCALL(__NR_symlink, sys_symlink)
-#define __NR_readlink 89
-__SYSCALL(__NR_readlink, sys_readlink)
-#define __NR_chmod 90
-__SYSCALL(__NR_chmod, sys_chmod)
-#define __NR_fchmod 91
-__SYSCALL(__NR_fchmod, sys_fchmod)
-#define __NR_chown 92
-__SYSCALL(__NR_chown, sys_chown)
-#define __NR_fchown 93
-__SYSCALL(__NR_fchown, sys_fchown)
-#define __NR_lchown 94
-__SYSCALL(__NR_lchown, sys_lchown)
-#define __NR_umask 95
-__SYSCALL(__NR_umask, sys_umask)
-
-#define __NR_gettimeofday 96
-__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
-#define __NR_getrlimit 97
-__SYSCALL(__NR_getrlimit, sys_getrlimit)
-#define __NR_getrusage 98
-__SYSCALL(__NR_getrusage, sys_getrusage)
-#define __NR_sysinfo 99
-__SYSCALL(__NR_sysinfo, sys_sysinfo)
-#define __NR_times 100
-__SYSCALL(__NR_times, sys_times)
-#define __NR_ptrace 101
-__SYSCALL(__NR_ptrace, sys_ptrace)
-#define __NR_getuid 102
-__SYSCALL(__NR_getuid, sys_getuid)
-#define __NR_syslog 103
-__SYSCALL(__NR_syslog, sys_syslog)
-
-/* at the very end the stuff that never runs during the benchmarks */
-#define __NR_getgid 104
-__SYSCALL(__NR_getgid, sys_getgid)
-#define __NR_setuid 105
-__SYSCALL(__NR_setuid, sys_setuid)
-#define __NR_setgid 106
-__SYSCALL(__NR_setgid, sys_setgid)
-#define __NR_geteuid 107
-__SYSCALL(__NR_geteuid, sys_geteuid)
-#define __NR_getegid 108
-__SYSCALL(__NR_getegid, sys_getegid)
-#define __NR_setpgid 109
-__SYSCALL(__NR_setpgid, sys_setpgid)
-#define __NR_getppid 110
-__SYSCALL(__NR_getppid, sys_getppid)
-#define __NR_getpgrp 111
-__SYSCALL(__NR_getpgrp, sys_getpgrp)
-
-#define __NR_setsid 112
-__SYSCALL(__NR_setsid, sys_setsid)
-#define __NR_setreuid 113
-__SYSCALL(__NR_setreuid, sys_setreuid)
-#define __NR_setregid 114
-__SYSCALL(__NR_setregid, sys_setregid)
-#define __NR_getgroups 115
-__SYSCALL(__NR_getgroups, sys_getgroups)
-#define __NR_setgroups 116
-__SYSCALL(__NR_setgroups, sys_setgroups)
-#define __NR_setresuid 117
-__SYSCALL(__NR_setresuid, sys_setresuid)
-#define __NR_getresuid 118
-__SYSCALL(__NR_getresuid, sys_getresuid)
-#define __NR_setresgid 119
-__SYSCALL(__NR_setresgid, sys_setresgid)
-
-#define __NR_getresgid 120
-__SYSCALL(__NR_getresgid, sys_getresgid)
-#define __NR_getpgid 121
-__SYSCALL(__NR_getpgid, sys_getpgid)
-#define __NR_setfsuid 122
-__SYSCALL(__NR_setfsuid, sys_setfsuid)
-#define __NR_setfsgid 123
-__SYSCALL(__NR_setfsgid, sys_setfsgid)
-#define __NR_getsid 124
-__SYSCALL(__NR_getsid, sys_getsid)
-#define __NR_capget 125
-__SYSCALL(__NR_capget, sys_capget)
-#define __NR_capset 126
-__SYSCALL(__NR_capset, sys_capset)
-
-#define __NR_rt_sigpending 127
-__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
-#define __NR_rt_sigtimedwait 128
-__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
-#define __NR_rt_sigqueueinfo 129
-__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
-#define __NR_rt_sigsuspend 130
-__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend)
-#define __NR_sigaltstack 131
-__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
-#define __NR_utime 132
-__SYSCALL(__NR_utime, sys_utime)
-#define __NR_mknod 133
-__SYSCALL(__NR_mknod, sys_mknod)
-
-/* Only needed for a.out */
-#define __NR_uselib 134
-__SYSCALL(__NR_uselib, sys_ni_syscall)
-#define __NR_personality 135
-__SYSCALL(__NR_personality, sys_personality)
-
-#define __NR_ustat 136
-__SYSCALL(__NR_ustat, sys_ustat)
-#define __NR_statfs 137
-__SYSCALL(__NR_statfs, sys_statfs)
-#define __NR_fstatfs 138
-__SYSCALL(__NR_fstatfs, sys_fstatfs)
-#define __NR_sysfs 139
-__SYSCALL(__NR_sysfs, sys_sysfs)
-
-#define __NR_getpriority 140
-__SYSCALL(__NR_getpriority, sys_getpriority)
-#define __NR_setpriority 141
-__SYSCALL(__NR_setpriority, sys_setpriority)
-#define __NR_sched_setparam 142
-__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
-#define __NR_sched_getparam 143
-__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
-#define __NR_sched_setscheduler 144
-__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
-#define __NR_sched_getscheduler 145
-__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
-#define __NR_sched_get_priority_max 146
-__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
-#define __NR_sched_get_priority_min 147
-__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
-#define __NR_sched_rr_get_interval 148
-__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
-
-#define __NR_mlock 149
-__SYSCALL(__NR_mlock, sys_mlock)
-#define __NR_munlock 150
-__SYSCALL(__NR_munlock, sys_munlock)
-#define __NR_mlockall 151
-__SYSCALL(__NR_mlockall, sys_mlockall)
-#define __NR_munlockall 152
-__SYSCALL(__NR_munlockall, sys_munlockall)
-
-#define __NR_vhangup 153
-__SYSCALL(__NR_vhangup, sys_vhangup)
-
-#define __NR_modify_ldt 154
-__SYSCALL(__NR_modify_ldt, sys_modify_ldt)
-
-#define __NR_pivot_root 155
-__SYSCALL(__NR_pivot_root, sys_pivot_root)
-
-#define __NR__sysctl 156
-__SYSCALL(__NR__sysctl, sys_sysctl)
-
-#define __NR_prctl 157
-__SYSCALL(__NR_prctl, sys_prctl)
-#define __NR_arch_prctl 158
-__SYSCALL(__NR_arch_prctl, sys_arch_prctl)
-
-#define __NR_adjtimex 159
-__SYSCALL(__NR_adjtimex, sys_adjtimex)
-
-#define __NR_setrlimit 160
-__SYSCALL(__NR_setrlimit, sys_setrlimit)
-
-#define __NR_chroot 161
-__SYSCALL(__NR_chroot, sys_chroot)
-
-#define __NR_sync 162
-__SYSCALL(__NR_sync, sys_sync)
-
-#define __NR_acct 163
-__SYSCALL(__NR_acct, sys_acct)
-
-#define __NR_settimeofday 164
-__SYSCALL(__NR_settimeofday, sys_settimeofday)
-
-#define __NR_mount 165
-__SYSCALL(__NR_mount, sys_mount)
-#define __NR_umount2 166
-__SYSCALL(__NR_umount2, sys_umount)
-
-#define __NR_swapon 167
-__SYSCALL(__NR_swapon, sys_swapon)
-#define __NR_swapoff 168
-__SYSCALL(__NR_swapoff, sys_swapoff)
-
-#define __NR_reboot 169
-__SYSCALL(__NR_reboot, sys_reboot)
-
-#define __NR_sethostname 170
-__SYSCALL(__NR_sethostname, sys_sethostname)
-#define __NR_setdomainname 171
-__SYSCALL(__NR_setdomainname, sys_setdomainname)
-
-#define __NR_iopl 172
-__SYSCALL(__NR_iopl, stub_iopl)
-#define __NR_ioperm 173
-__SYSCALL(__NR_ioperm, sys_ioperm)
-
-#define __NR_create_module 174
-__SYSCALL(__NR_create_module, sys_ni_syscall)
-#define __NR_init_module 175
-__SYSCALL(__NR_init_module, sys_init_module)
-#define __NR_delete_module 176
-__SYSCALL(__NR_delete_module, sys_delete_module)
-#define __NR_get_kernel_syms 177
-__SYSCALL(__NR_get_kernel_syms, sys_ni_syscall)
-#define __NR_query_module 178
-__SYSCALL(__NR_query_module, sys_ni_syscall)
-
-#define __NR_quotactl 179
-__SYSCALL(__NR_quotactl, sys_quotactl)
-
-#define __NR_nfsservctl 180
-__SYSCALL(__NR_nfsservctl, sys_nfsservctl)
-
-#define __NR_getpmsg 181 /* reserved for LiS/STREAMS */
-__SYSCALL(__NR_getpmsg, sys_ni_syscall)
-#define __NR_putpmsg 182 /* reserved for LiS/STREAMS */
-__SYSCALL(__NR_putpmsg, sys_ni_syscall)
-
-#define __NR_afs_syscall 183 /* reserved for AFS */
-__SYSCALL(__NR_afs_syscall, sys_ni_syscall)
-
-#define __NR_tuxcall 184 /* reserved for tux */
-__SYSCALL(__NR_tuxcall, sys_ni_syscall)
-
-#define __NR_security 185
-__SYSCALL(__NR_security, sys_ni_syscall)
-
-#define __NR_gettid 186
-__SYSCALL(__NR_gettid, sys_gettid)
-
-#define __NR_readahead 187
-__SYSCALL(__NR_readahead, sys_readahead)
-#define __NR_setxattr 188
-__SYSCALL(__NR_setxattr, sys_setxattr)
-#define __NR_lsetxattr 189
-__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
-#define __NR_fsetxattr 190
-__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
-#define __NR_getxattr 191
-__SYSCALL(__NR_getxattr, sys_getxattr)
-#define __NR_lgetxattr 192
-__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
-#define __NR_fgetxattr 193
-__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
-#define __NR_listxattr 194
-__SYSCALL(__NR_listxattr, sys_listxattr)
-#define __NR_llistxattr 195
-__SYSCALL(__NR_llistxattr, sys_llistxattr)
-#define __NR_flistxattr 196
-__SYSCALL(__NR_flistxattr, sys_flistxattr)
-#define __NR_removexattr 197
-__SYSCALL(__NR_removexattr, sys_removexattr)
-#define __NR_lremovexattr 198
-__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
-#define __NR_fremovexattr 199
-__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-#define __NR_tkill 200
-__SYSCALL(__NR_tkill, sys_tkill)
-#define __NR_time 201
-__SYSCALL(__NR_time, sys_time)
-#define __NR_futex 202
-__SYSCALL(__NR_futex, sys_futex)
-#define __NR_sched_setaffinity 203
-__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
-#define __NR_sched_getaffinity 204
-__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
-#define __NR_set_thread_area 205
-__SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */
-#define __NR_io_setup 206
-__SYSCALL(__NR_io_setup, sys_io_setup)
-#define __NR_io_destroy 207
-__SYSCALL(__NR_io_destroy, sys_io_destroy)
-#define __NR_io_getevents 208
-__SYSCALL(__NR_io_getevents, sys_io_getevents)
-#define __NR_io_submit 209
-__SYSCALL(__NR_io_submit, sys_io_submit)
-#define __NR_io_cancel 210
-__SYSCALL(__NR_io_cancel, sys_io_cancel)
-#define __NR_get_thread_area 211
-__SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */
-#define __NR_lookup_dcookie 212
-__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
-#define __NR_epoll_create 213
-__SYSCALL(__NR_epoll_create, sys_epoll_create)
-#define __NR_epoll_ctl_old 214
-__SYSCALL(__NR_epoll_ctl_old, sys_ni_syscall)
-#define __NR_epoll_wait_old 215
-__SYSCALL(__NR_epoll_wait_old, sys_ni_syscall)
-#define __NR_remap_file_pages 216
-__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
-#define __NR_getdents64 217
-__SYSCALL(__NR_getdents64, sys_getdents64)
-#define __NR_set_tid_address 218
-__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
-#define __NR_restart_syscall 219
-__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
-#define __NR_semtimedop 220
-__SYSCALL(__NR_semtimedop, sys_semtimedop)
-#define __NR_fadvise64 221
-__SYSCALL(__NR_fadvise64, sys_fadvise64)
-#define __NR_timer_create 222
-__SYSCALL(__NR_timer_create, sys_timer_create)
-#define __NR_timer_settime 223
-__SYSCALL(__NR_timer_settime, sys_timer_settime)
-#define __NR_timer_gettime 224
-__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
-#define __NR_timer_getoverrun 225
-__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
-#define __NR_timer_delete 226
-__SYSCALL(__NR_timer_delete, sys_timer_delete)
-#define __NR_clock_settime 227
-__SYSCALL(__NR_clock_settime, sys_clock_settime)
-#define __NR_clock_gettime 228
-__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
-#define __NR_clock_getres 229
-__SYSCALL(__NR_clock_getres, sys_clock_getres)
-#define __NR_clock_nanosleep 230
-__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
-#define __NR_exit_group 231
-__SYSCALL(__NR_exit_group, sys_exit_group)
-#define __NR_epoll_wait 232
-__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
-#define __NR_epoll_ctl 233
-__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
-#define __NR_tgkill 234
-__SYSCALL(__NR_tgkill, sys_tgkill)
-#define __NR_utimes 235
-__SYSCALL(__NR_utimes, sys_utimes)
-#define __NR_vserver 236
-__SYSCALL(__NR_vserver, sys_ni_syscall)
-#define __NR_mbind 237
-__SYSCALL(__NR_mbind, sys_mbind)
-#define __NR_set_mempolicy 238
-__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
-#define __NR_get_mempolicy 239
-__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
-#define __NR_mq_open 240
-__SYSCALL(__NR_mq_open, sys_mq_open)
-#define __NR_mq_unlink 241
-__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
-#define __NR_mq_timedsend 242
-__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
-#define __NR_mq_timedreceive 243
-__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
-#define __NR_mq_notify 244
-__SYSCALL(__NR_mq_notify, sys_mq_notify)
-#define __NR_mq_getsetattr 245
-__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
-#define __NR_kexec_load 246
-__SYSCALL(__NR_kexec_load, sys_kexec_load)
-#define __NR_waitid 247
-__SYSCALL(__NR_waitid, sys_waitid)
-#define __NR_add_key 248
-__SYSCALL(__NR_add_key, sys_add_key)
-#define __NR_request_key 249
-__SYSCALL(__NR_request_key, sys_request_key)
-#define __NR_keyctl 250
-__SYSCALL(__NR_keyctl, sys_keyctl)
-#define __NR_ioprio_set 251
-__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
-#define __NR_ioprio_get 252
-__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-#define __NR_inotify_init 253
-__SYSCALL(__NR_inotify_init, sys_inotify_init)
-#define __NR_inotify_add_watch 254
-__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
-#define __NR_inotify_rm_watch 255
-__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-#define __NR_migrate_pages 256
-__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
-#define __NR_openat 257
-__SYSCALL(__NR_openat, sys_openat)
-#define __NR_mkdirat 258
-__SYSCALL(__NR_mkdirat, sys_mkdirat)
-#define __NR_mknodat 259
-__SYSCALL(__NR_mknodat, sys_mknodat)
-#define __NR_fchownat 260
-__SYSCALL(__NR_fchownat, sys_fchownat)
-#define __NR_futimesat 261
-__SYSCALL(__NR_futimesat, sys_futimesat)
-#define __NR_newfstatat 262
-__SYSCALL(__NR_newfstatat, sys_newfstatat)
-#define __NR_unlinkat 263
-__SYSCALL(__NR_unlinkat, sys_unlinkat)
-#define __NR_renameat 264
-__SYSCALL(__NR_renameat, sys_renameat)
-#define __NR_linkat 265
-__SYSCALL(__NR_linkat, sys_linkat)
-#define __NR_symlinkat 266
-__SYSCALL(__NR_symlinkat, sys_symlinkat)
-#define __NR_readlinkat 267
-__SYSCALL(__NR_readlinkat, sys_readlinkat)
-#define __NR_fchmodat 268
-__SYSCALL(__NR_fchmodat, sys_fchmodat)
-#define __NR_faccessat 269
-__SYSCALL(__NR_faccessat, sys_faccessat)
-#define __NR_pselect6 270
-__SYSCALL(__NR_pselect6, sys_pselect6)
-#define __NR_ppoll 271
-__SYSCALL(__NR_ppoll, sys_ppoll)
-#define __NR_unshare 272
-__SYSCALL(__NR_unshare, sys_unshare)
-#define __NR_set_robust_list 273
-__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
-#define __NR_get_robust_list 274
-__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
-#define __NR_splice 275
-__SYSCALL(__NR_splice, sys_splice)
-#define __NR_tee 276
-__SYSCALL(__NR_tee, sys_tee)
-#define __NR_sync_file_range 277
-__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
-#define __NR_vmsplice 278
-__SYSCALL(__NR_vmsplice, sys_vmsplice)
-#define __NR_move_pages 279
-__SYSCALL(__NR_move_pages, sys_move_pages)
-#define __NR_utimensat 280
-__SYSCALL(__NR_utimensat, sys_utimensat)
-#define __IGNORE_getcpu /* implemented as a vsyscall */
-#define __NR_epoll_pwait 281
-__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
-#define __NR_signalfd 282
-__SYSCALL(__NR_signalfd, sys_signalfd)
-#define __NR_timerfd 283
-__SYSCALL(__NR_timerfd, sys_timerfd)
-#define __NR_eventfd 284
-__SYSCALL(__NR_eventfd, sys_eventfd)
-#define __NR_fallocate 285
-__SYSCALL(__NR_fallocate, sys_fallocate)
-
-#ifndef __NO_STUBS
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_TIME
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/ptrace.h>
-
-asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs);
-struct sigaction;
-asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact,
- size_t sigsetsize);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-#endif /* __NO_STUBS */
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-
-#endif /* _ASM_X86_64_UNISTD_H_ */
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
deleted file mode 100644
index 02710f6..0000000
--- a/include/asm-x86_64/unwind.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_64_UNWIND_H
-#define _ASM_X86_64_UNWIND_H
-
-#define UNW_PC(frame) ((void)(frame), 0UL)
-#define UNW_SP(frame) ((void)(frame), 0UL)
-
-static inline int arch_unw_user_mode(const void *info)
-{
- return 0;
-}
-
-#endif /* _ASM_X86_64_UNWIND_H */
diff --git a/include/asm-x86_64/user.h b/include/asm-x86_64/user.h
deleted file mode 100644
index 12785c6..0000000
--- a/include/asm-x86_64/user.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef _X86_64_USER_H
-#define _X86_64_USER_H
-
-#include <asm/types.h>
-#include <asm/page.h>
-/* Core file format: The core file is written in such a way that gdb
- can understand it and provide useful information to the user.
- There are quite a number of obstacles to being able to view the
- contents of the floating point registers, and until these are
- solved you will not be able to view the contents of them.
- Actually, you can read in the core file and look at the contents of
- the user struct to find out what the floating point registers
- contain.
-
- The actual file contents are as follows:
- UPAGE: 1 page consisting of a user struct that tells gdb what is present
- in the file. Directly after this is a copy of the task_struct, which
- is currently not used by gdb, but it may come in useful at some point.
- All of the registers are stored as part of the upage. The upage should
- always be only one page.
- DATA: The data area is stored. We use current->end_text to
- current->brk to pick up all of the user variables, plus any memory
- that may have been malloced. No attempt is made to determine if a page
- is demand-zero or if a page is totally unused, we just cover the entire
- range. All of the addresses are rounded in such a way that an integral
- number of pages is written.
- STACK: We need the stack information in order to get a meaningful
- backtrace. We need to write the data from (esp) to
- current->start_stack, so we round each of these off in order to be able
- to write an integer number of pages.
- The minimum core file size is 3 pages, or 12288 bytes. */
-
-/*
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
- * interacting with the FXSR-format floating point environment. Floating
- * point data can be accessed in the regular format in the usual manner,
- * and both the standard and SIMD floating point data can be accessed via
- * the new ptrace requests. In either case, changes to the FPU environment
- * will be reflected in the task's state as expected.
- *
- * x86-64 support by Andi Kleen.
- */
-
-/* This matches the 64bit FXSAVE format as defined by AMD. It is the same
- as the 32bit format defined by Intel, except that the selector:offset pairs for
- data and eip are replaced with flat 64bit pointers. */
-struct user_i387_struct {
- unsigned short cwd;
- unsigned short swd;
- unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
- unsigned short fop;
- __u64 rip;
- __u64 rdp;
- __u32 mxcsr;
- __u32 mxcsr_mask;
- __u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
- __u32 padding[24];
-};
-
-/*
- * Segment register layout in coredumps.
- */
-struct user_regs_struct {
- unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10;
- unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax;
- unsigned long rip,cs,eflags;
- unsigned long rsp,ss;
- unsigned long fs_base, gs_base;
- unsigned long ds,es,fs,gs;
-};
-
-/* When the kernel dumps core, it starts by dumping the user struct -
- this will be used by gdb to figure out where the data and stack segments
- are within the file, and what virtual addresses to use. */
-struct user{
-/* We start with the registers, to mimic the way that "memory" is returned
- from the ptrace(3,...) function. */
- struct user_regs_struct regs; /* Where the registers are actually stored */
-/* ptrace does not yet supply these. Someday.... */
- int u_fpvalid; /* True if math co-processor being used. */
- /* for this mess. Not yet used. */
- int pad0;
- struct user_i387_struct i387; /* Math Co-processor registers. */
-/* The rest of this junk is to help gdb figure out what goes where */
- unsigned long int u_tsize; /* Text segment size (pages). */
- unsigned long int u_dsize; /* Data segment size (pages). */
- unsigned long int u_ssize; /* Stack segment size (pages). */
- unsigned long start_code; /* Starting virtual address of text. */
- unsigned long start_stack; /* Starting virtual address of stack area.
- This is actually the bottom of the stack,
- the top of the stack is always found in the
- esp register. */
- long int signal; /* Signal that caused the core dump. */
- int reserved; /* No longer used */
- int pad1;
- struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
- /* the registers. */
- struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
- unsigned long magic; /* To uniquely identify a core file */
- char u_comm[32]; /* User command that was responsible */
- unsigned long u_debugreg[8];
- unsigned long error_code; /* CPU error code or 0 */
- unsigned long fault_address; /* CR3 or 0 */
-};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
-#endif /* _X86_64_USER_H */
diff --git a/include/asm-x86_64/user32.h b/include/asm-x86_64/user32.h
deleted file mode 100644
index f769872..0000000
--- a/include/asm-x86_64/user32.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef USER32_H
-#define USER32_H 1
-
-/* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */
-
-struct user_i387_ia32_struct {
- u32 cwd;
- u32 swd;
- u32 twd;
- u32 fip;
- u32 fcs;
- u32 foo;
- u32 fos;
- u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
-};
-
-/* FSAVE frame with extensions */
-struct user32_fxsr_struct {
- unsigned short cwd;
- unsigned short swd;
- unsigned short twd; /* not compatible to 64bit twd */
- unsigned short fop;
- int fip;
- int fcs;
- int foo;
- int fos;
- int mxcsr;
- int reserved;
- int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
- int padding[56];
-};
-
-struct user_regs_struct32 {
- __u32 ebx, ecx, edx, esi, edi, ebp, eax;
- unsigned short ds, __ds, es, __es;
- unsigned short fs, __fs, gs, __gs;
- __u32 orig_eax, eip;
- unsigned short cs, __cs;
- __u32 eflags, esp;
- unsigned short ss, __ss;
-};
-
-struct user32 {
- struct user_regs_struct32 regs; /* Where the registers are actually stored */
- int u_fpvalid; /* True if math co-processor being used. */
- /* for this mess. Not yet used. */
- struct user_i387_ia32_struct i387; /* Math Co-processor registers. */
-/* The rest of this junk is to help gdb figure out what goes where */
- __u32 u_tsize; /* Text segment size (pages). */
- __u32 u_dsize; /* Data segment size (pages). */
- __u32 u_ssize; /* Stack segment size (pages). */
- __u32 start_code; /* Starting virtual address of text. */
- __u32 start_stack; /* Starting virtual address of stack area.
- This is actually the bottom of the stack,
- the top of the stack is always found in the
- esp register. */
- __u32 signal; /* Signal that caused the core dump. */
- int reserved; /* No __u32er used */
- __u32 u_ar0; /* Used by gdb to help find the values for */
- /* the registers. */
- __u32 u_fpstate; /* Math Co-processor pointer. */
- __u32 magic; /* To uniquely identify a core file */
- char u_comm[32]; /* User command that was responsible */
- int u_debugreg[8];
-};
-
-
-#endif
diff --git a/include/asm-x86_64/vga.h b/include/asm-x86_64/vga.h
deleted file mode 100644
index 0ecf68a..0000000
--- a/include/asm-x86_64/vga.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Access to VGA videoram
- *
- * (c) 1998 Martin Mares <mj@ucw.cz>
- */
-
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
-
-/*
- * On the PC, we can just recalculate addresses and then
- * access the videoram directly without any black magic.
- */
-
-#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
-
-#define vga_readb(x) (*(x))
-#define vga_writeb(x,y) (*(y) = (x))
-
-#endif
diff --git a/include/asm-x86_64/vgtod.h b/include/asm-x86_64/vgtod.h
deleted file mode 100644
index 3301f09..0000000
--- a/include/asm-x86_64/vgtod.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _ASM_VGTOD_H
-#define _ASM_VGTOD_H 1
-
-#include <asm/vsyscall.h>
-#include <linux/clocksource.h>
-
-struct vsyscall_gtod_data {
- seqlock_t lock;
-
- /* open coded 'struct timespec' */
- time_t wall_time_sec;
- u32 wall_time_nsec;
-
- int sysctl_enabled;
- struct timezone sys_tz;
- struct { /* extract of a clocksource struct */
- cycle_t (*vread)(void);
- cycle_t cycle_last;
- cycle_t mask;
- u32 mult;
- u32 shift;
- } clock;
- struct timespec wall_to_monotonic;
-};
-extern struct vsyscall_gtod_data __vsyscall_gtod_data
-__section_vsyscall_gtod_data;
-extern struct vsyscall_gtod_data vsyscall_gtod_data;
-
-#endif
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
deleted file mode 100644
index 3b8ceb4..0000000
--- a/include/asm-x86_64/vsyscall.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _ASM_X86_64_VSYSCALL_H_
-#define _ASM_X86_64_VSYSCALL_H_
-
-enum vsyscall_num {
- __NR_vgettimeofday,
- __NR_vtime,
- __NR_vgetcpu,
-};
-
-#define VSYSCALL_START (-10UL << 20)
-#define VSYSCALL_SIZE 1024
-#define VSYSCALL_END (-2UL << 20)
-#define VSYSCALL_MAPPED_PAGES 1
-#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
-
-#ifdef __KERNEL__
-#include <linux/seqlock.h>
-
-#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
-
-/* Definitions for CONFIG_GENERIC_TIME definitions */
-#define __section_vsyscall_gtod_data __attribute__ \
- ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
-#define __section_vsyscall_clock __attribute__ \
- ((unused, __section__ (".vsyscall_clock"),aligned(16)))
-#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
-
-#define VGETCPU_RDTSCP 1
-#define VGETCPU_LSL 2
-
-#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
-#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
-
-extern int __vgetcpu_mode;
-extern volatile unsigned long __jiffies;
-
-/* kernel space (writeable) */
-extern int vgetcpu_mode;
-extern struct timezone sys_tz;
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_X86_64_VSYSCALL_H_ */
diff --git a/include/asm-x86_64/vsyscall32.h b/include/asm-x86_64/vsyscall32.h
deleted file mode 100644
index c631c08..0000000
--- a/include/asm-x86_64/vsyscall32.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_VSYSCALL32_H
-#define _ASM_VSYSCALL32_H 1
-
-/* Values need to match arch/x86_64/ia32/vsyscall.lds */
-
-#ifdef __ASSEMBLY__
-#define VSYSCALL32_BASE 0xffffe000
-#define VSYSCALL32_SYSEXIT (VSYSCALL32_BASE + 0x410)
-#else
-#define VSYSCALL32_BASE 0xffffe000UL
-#define VSYSCALL32_END (VSYSCALL32_BASE + PAGE_SIZE)
-#define VSYSCALL32_EHDR ((const struct elf32_hdr *) VSYSCALL32_BASE)
-
-#define VSYSCALL32_VSYSCALL ((void *)VSYSCALL32_BASE + 0x400)
-#define VSYSCALL32_SYSEXIT ((void *)VSYSCALL32_BASE + 0x410)
-#define VSYSCALL32_SIGRETURN ((void __user *)VSYSCALL32_BASE + 0x500)
-#define VSYSCALL32_RTSIGRETURN ((void __user *)VSYSCALL32_BASE + 0x600)
-#endif
-
-#endif
diff --git a/include/asm-x86_64/xor.h b/include/asm-x86_64/xor.h
deleted file mode 100644
index f942fcc..0000000
--- a/include/asm-x86_64/xor.h
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * include/asm-x86_64/xor.h
- *
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-/*
- * Based on
- * High-speed RAID5 checksumming functions utilizing SSE instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-/*
- * x86-64 changes / gcc fixes from Andi Kleen.
- * Copyright 2002 Andi Kleen, SuSE Labs.
- *
- * This hasn't been optimized for the hammer yet, but there are likely
- * no advantages to be gotten from x86-64 here anyways.
- */
-
-typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
-
-/* Doesn't use gcc to save the XMM registers, because there is no easy way to
- tell it to do a clts before the register saving. */
-#define XMMS_SAVE do { \
- preempt_disable(); \
- asm volatile ( \
- "movq %%cr0,%0 ;\n\t" \
- "clts ;\n\t" \
- "movups %%xmm0,(%1) ;\n\t" \
- "movups %%xmm1,0x10(%1) ;\n\t" \
- "movups %%xmm2,0x20(%1) ;\n\t" \
- "movups %%xmm3,0x30(%1) ;\n\t" \
- : "=&r" (cr0) \
- : "r" (xmm_save) \
- : "memory"); \
-} while(0)
-
-#define XMMS_RESTORE do { \
- asm volatile ( \
- "sfence ;\n\t" \
- "movups (%1),%%xmm0 ;\n\t" \
- "movups 0x10(%1),%%xmm1 ;\n\t" \
- "movups 0x20(%1),%%xmm2 ;\n\t" \
- "movups 0x30(%1),%%xmm3 ;\n\t" \
- "movq %0,%%cr0 ;\n\t" \
- : \
- : "r" (cr0), "r" (xmm_save) \
- : "memory"); \
- preempt_enable(); \
-} while(0)
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
-#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
-#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
-#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
-#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
-#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
-#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
-#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned int lines = bytes >> 8;
- unsigned long cr0;
- xmm_store_t xmm_save[4];
-
- XMMS_SAVE;
-
- asm volatile (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- PF1(i) \
- PF1(i+2) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
- : [inc] "r" (256UL)
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned int lines = bytes >> 8;
- xmm_store_t xmm_save[4];
- unsigned long cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+r" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
- : [inc] "r" (256UL)
- : "memory");
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned int lines = bytes >> 8;
- xmm_store_t xmm_save[4];
- unsigned long cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " addq %[inc], %[p4] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+c" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
- : [inc] "r" (256UL)
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned int lines = bytes >> 8;
- xmm_store_t xmm_save[4];
- unsigned long cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- PF4(i) \
- PF4(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- XO4(i+1,1) \
- XO4(i+2,2) \
- XO4(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " addq %[inc], %[p4] ;\n"
- " addq %[inc], %[p5] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+c" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
- [p5] "+r" (p5)
- : [inc] "r" (256UL)
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static struct xor_block_template xor_block_sse = {
- .name = "generic_sse",
- .do_2 = xor_sse_2,
- .do_3 = xor_sse_3,
- .do_4 = xor_sse_4,
- .do_5 = xor_sse_5,
-};
-
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_sse); \
- } while (0)
-
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
OpenPOWER on IntegriCloud