diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2008-08-17 19:13:17 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2008-10-22 22:55:19 -0700 |
commit | 8ede0bdb63305d3353efd97e9af6210afb05734e (patch) | |
tree | a9500a323d0a2dcadca43c23b5c20186f6d9b724 /arch/um/include | |
parent | 8569c9140bd41089f9b6be8837ca421102714a90 (diff) | |
download | op-kernel-dev-8ede0bdb63305d3353efd97e9af6210afb05734e.zip op-kernel-dev-8ede0bdb63305d3353efd97e9af6210afb05734e.tar.gz |
x86, um: initial part of asm-um move
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/um/include')
144 files changed, 3690 insertions, 0 deletions
diff --git a/arch/um/include/asm/a.out-core.h b/arch/um/include/asm/a.out-core.h new file mode 100644 index 0000000..995643b --- /dev/null +++ b/arch/um/include/asm/a.out-core.h @@ -0,0 +1,27 @@ +/* a.out coredump register dumper + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef __UM_A_OUT_CORE_H +#define __UM_A_OUT_CORE_H + +#ifdef __KERNEL__ + +#include <linux/user.h> + +/* + * fill in the user structure for an a.out core dump + */ +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u) +{ +} + +#endif /* __KERNEL__ */ +#endif /* __UM_A_OUT_CORE_H */ diff --git a/arch/um/include/asm/a.out.h b/arch/um/include/asm/a.out.h new file mode 100644 index 0000000..754181e --- /dev/null +++ b/arch/um/include/asm/a.out.h @@ -0,0 +1,11 @@ +/* + * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ + +#ifndef __UM_A_OUT_H +#define __UM_A_OUT_H + +#include "asm/arch/a.out.h" + +#endif diff --git a/arch/um/include/asm/alternative-asm.h b/arch/um/include/asm/alternative-asm.h new file mode 100644 index 0000000..9aa9fa2 --- /dev/null +++ b/arch/um/include/asm/alternative-asm.h @@ -0,0 +1,6 @@ +#ifndef __UM_ALTERNATIVE_ASM_I +#define __UM_ALTERNATIVE_ASM_I + +#include "asm/arch/alternative-asm.h" + +#endif diff --git a/arch/um/include/asm/alternative.h b/arch/um/include/asm/alternative.h new file mode 100644 index 0000000..b643439 --- /dev/null +++ b/arch/um/include/asm/alternative.h @@ -0,0 +1,6 @@ +#ifndef __UM_ALTERNATIVE_H +#define __UM_ALTERNATIVE_H + +#include "asm/arch/alternative.h" + +#endif diff --git a/arch/um/include/asm/apic.h b/arch/um/include/asm/apic.h new file mode 100644 index 0000000..876dee8 --- /dev/null +++ b/arch/um/include/asm/apic.h @@ -0,0 +1,4 @@ +#ifndef __UM_APIC_H +#define __UM_APIC_H + +#endif diff --git a/arch/um/include/asm/archparam-i386.h b/arch/um/include/asm/archparam-i386.h new file mode 100644 index 0000000..49e89b8 --- /dev/null +++ b/arch/um/include/asm/archparam-i386.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) + * Licensed under the GPL + */ + +#ifndef __UM_ARCHPARAM_I386_H +#define __UM_ARCHPARAM_I386_H + +/********* Nothing for asm-um/hardirq.h **********/ + +/********* Nothing for asm-um/hw_irq.h **********/ + +/********* Nothing for asm-um/string.h **********/ + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/archparam-ppc.h b/arch/um/include/asm/archparam-ppc.h new file mode 100644 index 0000000..4269d8a --- /dev/null +++ b/arch/um/include/asm/archparam-ppc.h @@ -0,0 +1,8 @@ +#ifndef __UM_ARCHPARAM_PPC_H +#define __UM_ARCHPARAM_PPC_H + +/********* Bits for asm-um/string.h **********/ + +#define __HAVE_ARCH_STRRCHR + +#endif diff --git a/arch/um/include/asm/archparam-x86_64.h b/arch/um/include/asm/archparam-x86_64.h new file mode 100644 index 0000000..270ed95 --- /dev/null +++ b/arch/um/include/asm/archparam-x86_64.h @@ -0,0 +1,26 @@ +/* + * Copyright 2003 PathScale, Inc. + * + * Licensed under the GPL + */ + +#ifndef __UM_ARCHPARAM_X86_64_H +#define __UM_ARCHPARAM_X86_64_H + + +/* No user-accessible fixmap addresses, i.e. vsyscall */ +#define FIXADDR_USER_START 0 +#define FIXADDR_USER_END 0 + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/asm.h b/arch/um/include/asm/asm.h new file mode 100644 index 0000000..af1269a --- /dev/null +++ b/arch/um/include/asm/asm.h @@ -0,0 +1,6 @@ +#ifndef __UM_ASM_H +#define __UM_ASM_H + +#include "asm/arch/asm.h" + +#endif diff --git a/arch/um/include/asm/atomic.h b/arch/um/include/asm/atomic.h new file mode 100644 index 0000000..b683f10 --- /dev/null +++ b/arch/um/include/asm/atomic.h @@ -0,0 +1,11 @@ +#ifndef __UM_ATOMIC_H +#define __UM_ATOMIC_H + +/* The i386 atomic.h calls printk, but doesn't include kernel.h, so we + * include it here. + */ +#include "linux/kernel.h" + +#include "asm/arch/atomic.h" + +#endif diff --git a/arch/um/include/asm/auxvec.h b/arch/um/include/asm/auxvec.h new file mode 100644 index 0000000..1e5e1c2 --- /dev/null +++ b/arch/um/include/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef __UM_AUXVEC_H +#define __UM_AUXVEC_H + +#endif diff --git a/arch/um/include/asm/bitops.h b/arch/um/include/asm/bitops.h new file mode 100644 index 0000000..e4d38d4 --- /dev/null +++ b/arch/um/include/asm/bitops.h @@ -0,0 +1,10 @@ +#ifndef __UM_BITOPS_H +#define __UM_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include "asm/arch/bitops.h" + +#endif diff --git a/arch/um/include/asm/boot.h b/arch/um/include/asm/boot.h new file mode 100644 index 0000000..09548c3 --- /dev/null +++ b/arch/um/include/asm/boot.h @@ -0,0 +1,6 @@ +#ifndef __UM_BOOT_H +#define __UM_BOOT_H + +#include "asm/arch/boot.h" + +#endif diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h new file mode 100644 index 0000000..9e33b86 --- /dev/null +++ b/arch/um/include/asm/bug.h @@ -0,0 +1,6 @@ +#ifndef __UM_BUG_H +#define __UM_BUG_H + +#include <asm-generic/bug.h> + +#endif diff --git a/arch/um/include/asm/bugs.h b/arch/um/include/asm/bugs.h new file mode 100644 index 0000000..6a72e24 --- /dev/null +++ b/arch/um/include/asm/bugs.h @@ -0,0 +1,6 @@ +#ifndef __UM_BUGS_H +#define __UM_BUGS_H + +void check_bugs(void); + +#endif diff --git a/arch/um/include/asm/byteorder.h b/arch/um/include/asm/byteorder.h new file mode 100644 index 0000000..eee0a83 --- /dev/null +++ b/arch/um/include/asm/byteorder.h @@ -0,0 +1,6 @@ +#ifndef __UM_BYTEORDER_H +#define __UM_BYTEORDER_H + +#include "asm/arch/byteorder.h" + +#endif diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h new file mode 100644 index 0000000..19e1bdd --- /dev/null +++ b/arch/um/include/asm/cache.h @@ -0,0 +1,17 @@ +#ifndef __UM_CACHE_H +#define __UM_CACHE_H + + +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT) +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +#elif defined(CONFIG_UML_X86) /* 64-bit */ +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */ +#else +/* XXX: this was taken from x86, now it's completely random. Luckily only + * affects SMP padding. */ +# define L1_CACHE_SHIFT 5 +#endif + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif diff --git a/arch/um/include/asm/cacheflush.h b/arch/um/include/asm/cacheflush.h new file mode 100644 index 0000000..12e9d4b --- /dev/null +++ b/arch/um/include/asm/cacheflush.h @@ -0,0 +1,6 @@ +#ifndef __UM_CACHEFLUSH_H +#define __UM_CACHEFLUSH_H + +#include "asm/arch/cacheflush.h" + +#endif diff --git a/arch/um/include/asm/calling.h b/arch/um/include/asm/calling.h new file mode 100644 index 0000000..0b2384c --- /dev/null +++ b/arch/um/include/asm/calling.h @@ -0,0 +1,9 @@ +# Copyright 2003 - 2004 Pathscale, Inc +# Released under the GPL + +#ifndef __UM_CALLING_H /* XXX x86_64 */ +#define __UM_CALLING_H + +#include "asm/arch/calling.h" + +#endif diff --git a/arch/um/include/asm/checksum.h b/arch/um/include/asm/checksum.h new file mode 100644 index 0000000..5b50136 --- /dev/null +++ b/arch/um/include/asm/checksum.h @@ -0,0 +1,6 @@ +#ifndef __UM_CHECKSUM_H +#define __UM_CHECKSUM_H + +#include "sysdep/checksum.h" + +#endif diff --git a/arch/um/include/asm/cmpxchg.h b/arch/um/include/asm/cmpxchg.h new file mode 100644 index 0000000..529376a --- /dev/null +++ b/arch/um/include/asm/cmpxchg.h @@ -0,0 +1,6 @@ +#ifndef __UM_CMPXCHG_H +#define __UM_CMPXCHG_H + +#include "asm/arch/cmpxchg.h" + +#endif diff --git a/arch/um/include/asm/cobalt.h b/arch/um/include/asm/cobalt.h new file mode 100644 index 0000000..f813a68 --- /dev/null +++ b/arch/um/include/asm/cobalt.h @@ -0,0 +1,6 @@ +#ifndef __UM_COBALT_H +#define __UM_COBALT_H + +#include "asm/arch/cobalt.h" + +#endif diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S new file mode 100644 index 0000000..cb02486 --- /dev/null +++ b/arch/um/include/asm/common.lds.S @@ -0,0 +1,130 @@ +#include <asm-generic/vmlinux.lds.h> + + .fini : { *(.fini) } =0x9090 + _etext = .; + PROVIDE (etext = .); + + . = ALIGN(4096); + _sdata = .; + PROVIDE (sdata = .); + + RODATA + + .unprotected : { *(.unprotected) } + . = ALIGN(4096); + PROVIDE (_unprotected_end = .); + + . = ALIGN(4096); + .note : { *(.note.*) } + __ex_table : { + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } + + BUG_TABLE + + .uml.setup.init : { + __uml_setup_start = .; + *(.uml.setup.init) + __uml_setup_end = .; + } + + .uml.help.init : { + __uml_help_start = .; + *(.uml.help.init) + __uml_help_end = .; + } + + .uml.postsetup.init : { + __uml_postsetup_start = .; + *(.uml.postsetup.init) + __uml_postsetup_end = .; + } + + .init.setup : { + __setup_start = .; + *(.init.setup) + __setup_end = .; + } + + . = ALIGN(32); + .data.percpu : { + __per_cpu_start = . ; + *(.data.percpu) + __per_cpu_end = . ; + } + + .initcall.init : { + __initcall_start = .; + INITCALLS + __initcall_end = .; + } + + .con_initcall.init : { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + + .uml.initcall.init : { + __uml_initcall_start = .; + *(.uml.initcall.init) + __uml_initcall_end = .; + } + __init_end = .; + + SECURITY_INIT + + .exitcall : { + __exitcall_begin = .; + *(.exitcall.exit) + __exitcall_end = .; + } + + .uml.exitcall : { + __uml_exitcall_begin = .; + *(.uml.exitcall.exit) + __uml_exitcall_end = .; + } + + . = ALIGN(4); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + .altinstr_replacement : { *(.altinstr_replacement) } + /* .exit.text is discard at runtime, not link time, to deal with references + from .altinstructions and .eh_frame */ + .exit.text : { *(.exit.text) } + .exit.data : { *(.exit.data) } + + .preinit_array : { + __preinit_array_start = .; + *(.preinit_array) + __preinit_array_end = .; + } + .init_array : { + __init_array_start = .; + *(.init_array) + __init_array_end = .; + } + .fini_array : { + __fini_array_start = .; + *(.fini_array) + __fini_array_end = .; + } + + . = ALIGN(4096); + .init.ramfs : { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + } + + /* Sections to be discarded */ + /DISCARD/ : { + *(.exitcall.exit) + } + diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h new file mode 100644 index 0000000..fb7bd42 --- /dev/null +++ b/arch/um/include/asm/cpufeature.h @@ -0,0 +1,6 @@ +#ifndef __UM_CPUFEATURE_H +#define __UM_CPUFEATURE_H + +#include "asm/arch/cpufeature.h" + +#endif diff --git a/arch/um/include/asm/cputime.h b/arch/um/include/asm/cputime.h new file mode 100644 index 0000000..c84acba --- /dev/null +++ b/arch/um/include/asm/cputime.h @@ -0,0 +1,6 @@ +#ifndef __UM_CPUTIME_H +#define __UM_CPUTIME_H + +#include <asm-generic/cputime.h> + +#endif /* __UM_CPUTIME_H */ diff --git a/arch/um/include/asm/current.h b/arch/um/include/asm/current.h new file mode 100644 index 0000000..c2191d9 --- /dev/null +++ b/arch/um/include/asm/current.h @@ -0,0 +1,13 @@ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ + +#ifndef __UM_CURRENT_H +#define __UM_CURRENT_H + +#include "linux/thread_info.h" + +#define current (current_thread_info()->task) + +#endif diff --git a/arch/um/include/asm/delay.h b/arch/um/include/asm/delay.h new file mode 100644 index 0000000..c71e32b --- /dev/null +++ b/arch/um/include/asm/delay.h @@ -0,0 +1,20 @@ +#ifndef __UM_DELAY_H +#define __UM_DELAY_H + +#define MILLION 1000000 + +/* Undefined on purpose */ +extern void __bad_udelay(void); + +extern void __udelay(unsigned long usecs); +extern void __delay(unsigned long loops); + +#define udelay(n) ((__builtin_constant_p(n) && (n) > 20000) ? \ + __bad_udelay() : __udelay(n)) + +/* It appears that ndelay is not used at all for UML, and has never been + * implemented. */ +extern void __unimplemented_ndelay(void); +#define ndelay(n) __unimplemented_ndelay() + +#endif diff --git a/arch/um/include/asm/desc.h b/arch/um/include/asm/desc.h new file mode 100644 index 0000000..4ec34a5 --- /dev/null +++ b/arch/um/include/asm/desc.h @@ -0,0 +1,16 @@ +#ifndef __UM_DESC_H +#define __UM_DESC_H + +/* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't + * compile, and has never been used. */ +#define LDT_empty(info) (\ + (info)->base_addr == 0 && \ + (info)->limit == 0 && \ + (info)->contents == 0 && \ + (info)->read_exec_only == 1 && \ + (info)->seg_32bit == 0 && \ + (info)->limit_in_pages == 0 && \ + (info)->seg_not_present == 1 && \ + (info)->useable == 0 ) + +#endif diff --git a/arch/um/include/asm/device.h b/arch/um/include/asm/device.h new file mode 100644 index 0000000..d8f9872 --- /dev/null +++ b/arch/um/include/asm/device.h @@ -0,0 +1,7 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#include <asm-generic/device.h> + diff --git a/arch/um/include/asm/div64.h b/arch/um/include/asm/div64.h new file mode 100644 index 0000000..1e17f74 --- /dev/null +++ b/arch/um/include/asm/div64.h @@ -0,0 +1,6 @@ +#ifndef _UM_DIV64_H +#define _UM_DIV64_H + +#include "asm/arch/div64.h" + +#endif diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h new file mode 100644 index 0000000..90fc708 --- /dev/null +++ b/arch/um/include/asm/dma-mapping.h @@ -0,0 +1,128 @@ +#ifndef _ASM_DMA_MAPPING_H +#define _ASM_DMA_MAPPING_H + +#include <asm/scatterlist.h> + +static inline int +dma_supported(struct device *dev, u64 mask) +{ + BUG(); + return(0); +} + +static inline int +dma_set_mask(struct device *dev, u64 dma_mask) +{ + BUG(); + return(0); +} + +static inline void * +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag) +{ + BUG(); + return((void *) 0); +} + +static inline void +dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle) +{ + BUG(); +} + +static inline dma_addr_t +dma_map_single(struct device *dev, void *cpu_addr, size_t size, + enum dma_data_direction direction) +{ + BUG(); + return(0); +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + BUG(); + return(0); +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + BUG(); + return(0); +} + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline void +dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline void +dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + BUG(); +} + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +#define dma_is_consistent(d, h) (1) + +static inline int +dma_get_cache_alignment(void) +{ + BUG(); + return(0); +} + +static inline void +dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline void +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline int +dma_mapping_error(struct device *dev, dma_addr_t dma_handle) +{ + BUG(); + return 0; +} + +#endif diff --git a/arch/um/include/asm/dma.h b/arch/um/include/asm/dma.h new file mode 100644 index 0000000..9f6139a --- /dev/null +++ b/arch/um/include/asm/dma.h @@ -0,0 +1,10 @@ +#ifndef __UM_DMA_H +#define __UM_DMA_H + +#include "asm/io.h" + +extern unsigned long uml_physmem; + +#define MAX_DMA_ADDRESS (uml_physmem) + +#endif diff --git a/arch/um/include/asm/dwarf2.h b/arch/um/include/asm/dwarf2.h new file mode 100644 index 0000000..d1a02e7 --- /dev/null +++ b/arch/um/include/asm/dwarf2.h @@ -0,0 +1,11 @@ +/* Copyright 2003 - 2004 Pathscale, Inc + * Released under the GPL + */ + +/* Needed on x86_64 by thunk.S */ +#ifndef __UM_DWARF2_H +#define __UM_DWARF2_H + +#include "asm/arch/dwarf2.h" + +#endif diff --git a/arch/um/include/asm/elf-i386.h b/arch/um/include/asm/elf-i386.h new file mode 100644 index 0000000..d0da9d7 --- /dev/null +++ b/arch/um/include/asm/elf-i386.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ +#ifndef __UM_ELF_I386_H +#define __UM_ELF_I386_H + +#include <asm/user.h> +#include "skas.h" + +#define R_386_NONE 0 +#define R_386_32 1 +#define R_386_PC32 2 +#define R_386_GOT32 3 +#define R_386_PLT32 4 +#define R_386_COPY 5 +#define R_386_GLOB_DAT 6 +#define R_386_JMP_SLOT 7 +#define R_386_RELATIVE 8 +#define R_386_GOTOFF 9 +#define R_386_GOTPC 10 +#define R_386_NUM 11 + +typedef unsigned long elf_greg_t; + +#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef struct user_i387_struct elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) \ + (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) + +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_386 + +#define ELF_PLAT_INIT(regs, load_addr) do { \ + PT_REGS_EBX(regs) = 0; \ + PT_REGS_ECX(regs) = 0; \ + PT_REGS_EDX(regs) = 0; \ + PT_REGS_ESI(regs) = 0; \ + PT_REGS_EDI(regs) = 0; \ + PT_REGS_EBP(regs) = 0; \ + PT_REGS_EAX(regs) = 0; \ +} while (0) + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + +/* Shamelessly stolen from include/asm-i386/elf.h */ + +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = PT_REGS_EBX(regs); \ + pr_reg[1] = PT_REGS_ECX(regs); \ + pr_reg[2] = PT_REGS_EDX(regs); \ + pr_reg[3] = PT_REGS_ESI(regs); \ + pr_reg[4] = PT_REGS_EDI(regs); \ + pr_reg[5] = PT_REGS_EBP(regs); \ + pr_reg[6] = PT_REGS_EAX(regs); \ + pr_reg[7] = PT_REGS_DS(regs); \ + pr_reg[8] = PT_REGS_ES(regs); \ + /* fake once used fs and gs selectors? */ \ + pr_reg[9] = PT_REGS_DS(regs); \ + pr_reg[10] = PT_REGS_DS(regs); \ + pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \ + pr_reg[12] = PT_REGS_IP(regs); \ + pr_reg[13] = PT_REGS_CS(regs); \ + pr_reg[14] = PT_REGS_EFLAGS(regs); \ + pr_reg[15] = PT_REGS_SP(regs); \ + pr_reg[16] = PT_REGS_SS(regs); \ +} while (0); + +extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); + +#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu) + +extern long elf_aux_hwcap; +#define ELF_HWCAP (elf_aux_hwcap) + +extern char * elf_aux_platform; +#define ELF_PLATFORM (elf_aux_platform) + +#define SET_PERSONALITY(ex) do { } while (0) + +extern unsigned long vsyscall_ehdr; +extern unsigned long vsyscall_end; +extern unsigned long __kernel_vsyscall; + +#define VSYSCALL_BASE vsyscall_ehdr +#define VSYSCALL_END vsyscall_end + +/* + * This is the range that is readable by user mode, and things + * acting like user mode such as get_user_pages. + */ +#define FIXADDR_USER_START VSYSCALL_BASE +#define FIXADDR_USER_END VSYSCALL_END + +/* + * Architecture-neutral AT_ values in 0-17, leave some room + * for more of them, start the x86-specific ones at 32. + */ +#define AT_SYSINFO 32 +#define AT_SYSINFO_EHDR 33 + +#define ARCH_DLINFO \ +do { \ + if ( vsyscall_ehdr ) { \ + NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \ + } \ +} while (0) + +/* + * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out + * extra segments containing the vsyscall DSO contents. Dumping its + * contents makes post-mortem fully interpretable later without matching up + * the same kernel and hardware config to see what PC values meant. + * Dumping its extra ELF program headers includes all the other information + * a debugger needs to easily find how the vsyscall DSO was being used. + */ +#define ELF_CORE_EXTRA_PHDRS \ + (vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0 ) + +#define ELF_CORE_WRITE_EXTRA_PHDRS \ +if ( vsyscall_ehdr ) { \ + const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; \ + const struct elf_phdr *const phdrp = \ + (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); \ + int i; \ + Elf32_Off ofs = 0; \ + for (i = 0; i < ehdrp->e_phnum; ++i) { \ + struct elf_phdr phdr = phdrp[i]; \ + if (phdr.p_type == PT_LOAD) { \ + ofs = phdr.p_offset = offset; \ + offset += phdr.p_filesz; \ + } \ + else \ + phdr.p_offset += ofs; \ + phdr.p_paddr = 0; /* match other core phdrs */ \ + DUMP_WRITE(&phdr, sizeof(phdr)); \ + } \ +} +#define ELF_CORE_WRITE_EXTRA_DATA \ +if ( vsyscall_ehdr ) { \ + const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; \ + const struct elf_phdr *const phdrp = \ + (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); \ + int i; \ + for (i = 0; i < ehdrp->e_phnum; ++i) { \ + if (phdrp[i].p_type == PT_LOAD) \ + DUMP_WRITE((void *) phdrp[i].p_vaddr, \ + phdrp[i].p_filesz); \ + } \ +} + +#endif diff --git a/arch/um/include/asm/elf-ppc.h b/arch/um/include/asm/elf-ppc.h new file mode 100644 index 0000000..af9463c --- /dev/null +++ b/arch/um/include/asm/elf-ppc.h @@ -0,0 +1,53 @@ +#ifndef __UM_ELF_PPC_H +#define __UM_ELF_PPC_H + + +extern long elf_aux_hwcap; +#define ELF_HWCAP (elf_aux_hwcap) + +#define SET_PERSONALITY(ex) do ; while(0) + +#define ELF_EXEC_PAGESIZE 4096 + +#define elf_check_arch(x) (1) + +#ifdef CONFIG_64BIT +#define ELF_CLASS ELFCLASS64 +#else +#define ELF_CLASS ELFCLASS32 +#endif + +#define USE_ELF_CORE_DUMP + +#define R_386_NONE 0 +#define R_386_32 1 +#define R_386_PC32 2 +#define R_386_GOT32 3 +#define R_386_PLT32 4 +#define R_386_COPY 5 +#define R_386_GLOB_DAT 6 +#define R_386_JMP_SLOT 7 +#define R_386_RELATIVE 8 +#define R_386_GOTOFF 9 +#define R_386_GOTPC 10 +#define R_386_NUM 11 + +#define ELF_PLATFORM (0) + +#define ELF_ET_DYN_BASE (0x08000000) + +/* the following stolen from asm-ppc/elf.h */ +#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ +#define ELF_NFPREG 33 /* includes fpscr */ +/* General registers */ +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* Floating point registers */ +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +#define ELF_DATA ELFDATA2MSB +#define ELF_ARCH EM_PPC + +#endif diff --git a/arch/um/include/asm/elf-x86_64.h b/arch/um/include/asm/elf-x86_64.h new file mode 100644 index 0000000..6e8a919 --- /dev/null +++ b/arch/um/include/asm/elf-x86_64.h @@ -0,0 +1,119 @@ +/* + * Copyright 2003 PathScale, Inc. + * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * + * Licensed under the GPL + */ +#ifndef __UM_ELF_X86_64_H +#define __UM_ELF_X86_64_H + +#include <asm/user.h> +#include "skas.h" + +/* x86-64 relocation types, taken from asm-x86_64/elf.h */ +#define R_X86_64_NONE 0 /* No reloc */ +#define R_X86_64_64 1 /* Direct 64 bit */ +#define R_X86_64_PC32 2 /* PC relative 32 bit signed */ +#define R_X86_64_GOT32 3 /* 32 bit GOT entry */ +#define R_X86_64_PLT32 4 /* 32 bit PLT address */ +#define R_X86_64_COPY 5 /* Copy symbol at runtime */ +#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ +#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ +#define R_X86_64_RELATIVE 8 /* Adjust by program base */ +#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative + offset to GOT */ +#define R_X86_64_32 10 /* Direct 32 bit zero extended */ +#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ +#define R_X86_64_16 12 /* Direct 16 bit zero extended */ +#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ +#define R_X86_64_8 14 /* Direct 8 bit sign extended */ +#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ + +#define R_X86_64_NUM 16 + +typedef unsigned long elf_greg_t; + +#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef struct user_i387_struct elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) \ + ((x)->e_machine == EM_X86_64) + +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_X86_64 + +#define ELF_PLAT_INIT(regs, load_addr) do { \ + PT_REGS_RBX(regs) = 0; \ + PT_REGS_RCX(regs) = 0; \ + PT_REGS_RDX(regs) = 0; \ + PT_REGS_RSI(regs) = 0; \ + PT_REGS_RDI(regs) = 0; \ + PT_REGS_RBP(regs) = 0; \ + PT_REGS_RAX(regs) = 0; \ + PT_REGS_R8(regs) = 0; \ + PT_REGS_R9(regs) = 0; \ + PT_REGS_R10(regs) = 0; \ + PT_REGS_R11(regs) = 0; \ + PT_REGS_R12(regs) = 0; \ + PT_REGS_R13(regs) = 0; \ + PT_REGS_R14(regs) = 0; \ + PT_REGS_R15(regs) = 0; \ +} while (0) + +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ + (pr_reg)[0] = (regs)->regs.gp[0]; \ + (pr_reg)[1] = (regs)->regs.gp[1]; \ + (pr_reg)[2] = (regs)->regs.gp[2]; \ + (pr_reg)[3] = (regs)->regs.gp[3]; \ + (pr_reg)[4] = (regs)->regs.gp[4]; \ + (pr_reg)[5] = (regs)->regs.gp[5]; \ + (pr_reg)[6] = (regs)->regs.gp[6]; \ + (pr_reg)[7] = (regs)->regs.gp[7]; \ + (pr_reg)[8] = (regs)->regs.gp[8]; \ + (pr_reg)[9] = (regs)->regs.gp[9]; \ + (pr_reg)[10] = (regs)->regs.gp[10]; \ + (pr_reg)[11] = (regs)->regs.gp[11]; \ + (pr_reg)[12] = (regs)->regs.gp[12]; \ + (pr_reg)[13] = (regs)->regs.gp[13]; \ + (pr_reg)[14] = (regs)->regs.gp[14]; \ + (pr_reg)[15] = (regs)->regs.gp[15]; \ + (pr_reg)[16] = (regs)->regs.gp[16]; \ + (pr_reg)[17] = (regs)->regs.gp[17]; \ + (pr_reg)[18] = (regs)->regs.gp[18]; \ + (pr_reg)[19] = (regs)->regs.gp[19]; \ + (pr_reg)[20] = (regs)->regs.gp[20]; \ + (pr_reg)[21] = current->thread.arch.fs; \ + (pr_reg)[22] = 0; \ + (pr_reg)[23] = 0; \ + (pr_reg)[24] = 0; \ + (pr_reg)[25] = 0; \ + (pr_reg)[26] = 0; + +extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); + +#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu) + +#ifdef TIF_IA32 /* XXX */ +#error XXX, indeed + clear_thread_flag(TIF_IA32); +#endif + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + +extern long elf_aux_hwcap; +#define ELF_HWCAP (elf_aux_hwcap) + +#define ELF_PLATFORM "x86_64" + +#define SET_PERSONALITY(ex) do ; while(0) + +#endif diff --git a/arch/um/include/asm/emergency-restart.h b/arch/um/include/asm/emergency-restart.h new file mode 100644 index 0000000..108d8c4 --- /dev/null +++ b/arch/um/include/asm/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include <asm-generic/emergency-restart.h> + +#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/um/include/asm/errno.h b/arch/um/include/asm/errno.h new file mode 100644 index 0000000..b7a9e37 --- /dev/null +++ b/arch/um/include/asm/errno.h @@ -0,0 +1,6 @@ +#ifndef __UM_ERRNO_H +#define __UM_ERRNO_H + +#include "asm/arch/errno.h" + +#endif diff --git a/arch/um/include/asm/fcntl.h b/arch/um/include/asm/fcntl.h new file mode 100644 index 0000000..812a654 --- /dev/null +++ b/arch/um/include/asm/fcntl.h @@ -0,0 +1,6 @@ +#ifndef __UM_FCNTL_H +#define __UM_FCNTL_H + +#include "asm/arch/fcntl.h" + +#endif diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h new file mode 100644 index 0000000..9d2be52 --- /dev/null +++ b/arch/um/include/asm/fixmap.h @@ -0,0 +1,98 @@ +#ifndef __UM_FIXMAP_H +#define __UM_FIXMAP_H + +#include <asm/processor.h> +#include <asm/system.h> +#include <asm/kmap_types.h> +#include <asm/archparam.h> +#include <asm/page.h> + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ + +/* + * on UP currently we will have no trace of the fixmap mechanizm, + * no page table allocations, etc. This might change in the + * future, say framebuffers for the console driver(s) could be + * fix-mapped? + */ +enum fixed_addresses { +#ifdef CONFIG_HIGHMEM + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, +#endif + __end_of_fixed_addresses +}; + +extern void __set_fixmap (enum fixed_addresses idx, + unsigned long phys, pgprot_t flags); + +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL) +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) +/* + * used by vmalloc.c. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap, and leave one page empty + * at the top of mem.. + */ + +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE) +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static inline unsigned long fix_to_virt(const unsigned int idx) +{ + /* + * this branch gets completely eliminated after inlining, + * except when someone tries to use fixaddr indices in an + * illegal way. (such as mixing up address types or using + * out-of-range indices). + * + * If it doesn't get removed, the linker will complain + * loudly with a reasonably clear error message.. + */ + if (idx >= __end_of_fixed_addresses) + __this_fixmap_does_not_exist(); + + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#endif diff --git a/arch/um/include/asm/floppy.h b/arch/um/include/asm/floppy.h new file mode 100644 index 0000000..453e741 --- /dev/null +++ b/arch/um/include/asm/floppy.h @@ -0,0 +1,6 @@ +#ifndef __UM_FLOPPY_H +#define __UM_FLOPPY_H + +#include "asm/arch/floppy.h" + +#endif diff --git a/arch/um/include/asm/frame.h b/arch/um/include/asm/frame.h new file mode 100644 index 0000000..8a8c1cb --- /dev/null +++ b/arch/um/include/asm/frame.h @@ -0,0 +1,6 @@ +#ifndef __UM_FRAME_I +#define __UM_FRAME_I + +#include "asm/arch/frame.h" + +#endif diff --git a/arch/um/include/asm/futex.h b/arch/um/include/asm/futex.h new file mode 100644 index 0000000..6a332a9 --- /dev/null +++ b/arch/um/include/asm/futex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#include <asm-generic/futex.h> + +#endif diff --git a/arch/um/include/asm/hardirq.h b/arch/um/include/asm/hardirq.h new file mode 100644 index 0000000..313ebb8 --- /dev/null +++ b/arch/um/include/asm/hardirq.h @@ -0,0 +1,25 @@ +/* (c) 2004 cw@f00f.org, GPLv2 blah blah */ + +#ifndef __ASM_UM_HARDIRQ_H +#define __ASM_UM_HARDIRQ_H + +#include <linux/threads.h> +#include <linux/irq.h> + +/* NOTE: When SMP works again we might want to make this + * ____cacheline_aligned or maybe use per_cpu state? --cw */ +typedef struct { + unsigned int __softirq_pending; +} irq_cpustat_t; + +#include <linux/irq_cpustat.h> + +/* As this would be very strange for UML to get we BUG() after the + * printk. */ +static inline void ack_bad_irq(unsigned int irq) +{ + printk(KERN_ERR "unexpected IRQ %02x\n", irq); + BUG(); +} + +#endif /* __ASM_UM_HARDIRQ_H */ diff --git a/arch/um/include/asm/highmem.h b/arch/um/include/asm/highmem.h new file mode 100644 index 0000000..36974cb --- /dev/null +++ b/arch/um/include/asm/highmem.h @@ -0,0 +1,12 @@ +#ifndef __UM_HIGHMEM_H +#define __UM_HIGHMEM_H + +#include "asm/page.h" +#include "asm/fixmap.h" +#include "asm/arch/highmem.h" + +#undef PKMAP_BASE + +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK) + +#endif diff --git a/arch/um/include/asm/host_ldt-i386.h b/arch/um/include/asm/host_ldt-i386.h new file mode 100644 index 0000000..b27cb0a --- /dev/null +++ b/arch/um/include/asm/host_ldt-i386.h @@ -0,0 +1,34 @@ +#ifndef __ASM_HOST_LDT_I386_H +#define __ASM_HOST_LDT_I386_H + +#include "asm/arch/ldt.h" + +/* + * macros stolen from include/asm-i386/desc.h + */ +#define LDT_entry_a(info) \ + ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) + +#define LDT_entry_b(info) \ + (((info)->base_addr & 0xff000000) | \ + (((info)->base_addr & 0x00ff0000) >> 16) | \ + ((info)->limit & 0xf0000) | \ + (((info)->read_exec_only ^ 1) << 9) | \ + ((info)->contents << 10) | \ + (((info)->seg_not_present ^ 1) << 15) | \ + ((info)->seg_32bit << 22) | \ + ((info)->limit_in_pages << 23) | \ + ((info)->useable << 20) | \ + 0x7000) + +#define LDT_empty(info) (\ + (info)->base_addr == 0 && \ + (info)->limit == 0 && \ + (info)->contents == 0 && \ + (info)->read_exec_only == 1 && \ + (info)->seg_32bit == 0 && \ + (info)->limit_in_pages == 0 && \ + (info)->seg_not_present == 1 && \ + (info)->useable == 0 ) + +#endif diff --git a/arch/um/include/asm/host_ldt-x86_64.h b/arch/um/include/asm/host_ldt-x86_64.h new file mode 100644 index 0000000..74a63f7 --- /dev/null +++ b/arch/um/include/asm/host_ldt-x86_64.h @@ -0,0 +1,38 @@ +#ifndef __ASM_HOST_LDT_X86_64_H +#define __ASM_HOST_LDT_X86_64_H + +#include "asm/arch/ldt.h" + +/* + * macros stolen from include/asm-x86_64/desc.h + */ +#define LDT_entry_a(info) \ + ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) + +/* Don't allow setting of the lm bit. It is useless anyways because + * 64bit system calls require __USER_CS. */ +#define LDT_entry_b(info) \ + (((info)->base_addr & 0xff000000) | \ + (((info)->base_addr & 0x00ff0000) >> 16) | \ + ((info)->limit & 0xf0000) | \ + (((info)->read_exec_only ^ 1) << 9) | \ + ((info)->contents << 10) | \ + (((info)->seg_not_present ^ 1) << 15) | \ + ((info)->seg_32bit << 22) | \ + ((info)->limit_in_pages << 23) | \ + ((info)->useable << 20) | \ + /* ((info)->lm << 21) | */ \ + 0x7000) + +#define LDT_empty(info) (\ + (info)->base_addr == 0 && \ + (info)->limit == 0 && \ + (info)->contents == 0 && \ + (info)->read_exec_only == 1 && \ + (info)->seg_32bit == 0 && \ + (info)->limit_in_pages == 0 && \ + (info)->seg_not_present == 1 && \ + (info)->useable == 0 && \ + (info)->lm == 0) + +#endif diff --git a/arch/um/include/asm/hw_irq.h b/arch/um/include/asm/hw_irq.h new file mode 100644 index 0000000..1cf84cf --- /dev/null +++ b/arch/um/include/asm/hw_irq.h @@ -0,0 +1,7 @@ +#ifndef _ASM_UM_HW_IRQ_H +#define _ASM_UM_HW_IRQ_H + +#include "asm/irq.h" +#include "asm/archparam.h" + +#endif diff --git a/arch/um/include/asm/ide.h b/arch/um/include/asm/ide.h new file mode 100644 index 0000000..3d1cceb --- /dev/null +++ b/arch/um/include/asm/ide.h @@ -0,0 +1,6 @@ +#ifndef __UM_IDE_H +#define __UM_IDE_H + +#include "asm/arch/ide.h" + +#endif diff --git a/arch/um/include/asm/io.h b/arch/um/include/asm/io.h new file mode 100644 index 0000000..44e8b8c --- /dev/null +++ b/arch/um/include/asm/io.h @@ -0,0 +1,57 @@ +#ifndef __UM_IO_H +#define __UM_IO_H + +#include "asm/page.h" + +#define IO_SPACE_LIMIT 0xdeadbeef /* Sure hope nothing uses this */ + +static inline int inb(unsigned long i) { return(0); } +static inline void outb(char c, unsigned long i) { } + +/* + * Change virtual addresses to physical addresses and vv. + * These are pretty trivial + */ +static inline unsigned long virt_to_phys(volatile void * address) +{ + return __pa((void *) address); +} + +static inline void * phys_to_virt(unsigned long address) +{ + return __va(address); +} + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +static inline void writeb(unsigned char b, volatile void __iomem *addr) +{ + *(volatile unsigned char __force *) addr = b; +} +static inline void writew(unsigned short b, volatile void __iomem *addr) +{ + *(volatile unsigned short __force *) addr = b; +} +static inline void writel(unsigned int b, volatile void __iomem *addr) +{ + *(volatile unsigned int __force *) addr = b; +} +static inline void writeq(unsigned int b, volatile void __iomem *addr) +{ + *(volatile unsigned long long __force *) addr = b; +} +#define __raw_writeb writeb +#define __raw_writew writew +#define __raw_writel writel +#define __raw_writeq writeq + +#endif diff --git a/arch/um/include/asm/ioctl.h b/arch/um/include/asm/ioctl.h new file mode 100644 index 0000000..cc22157 --- /dev/null +++ b/arch/um/include/asm/ioctl.h @@ -0,0 +1,6 @@ +#ifndef __UM_IOCTL_H +#define __UM_IOCTL_H + +#include "asm/arch/ioctl.h" + +#endif diff --git a/arch/um/include/asm/ioctls.h b/arch/um/include/asm/ioctls.h new file mode 100644 index 0000000..9a1a017d --- /dev/null +++ b/arch/um/include/asm/ioctls.h @@ -0,0 +1,6 @@ +#ifndef __UM_IOCTLS_H +#define __UM_IOCTLS_H + +#include "asm/arch/ioctls.h" + +#endif diff --git a/arch/um/include/asm/ipcbuf.h b/arch/um/include/asm/ipcbuf.h new file mode 100644 index 0000000..bb2ad31 --- /dev/null +++ b/arch/um/include/asm/ipcbuf.h @@ -0,0 +1,6 @@ +#ifndef __UM_IPCBUF_H +#define __UM_IPCBUF_H + +#include "asm/arch/ipcbuf.h" + +#endif diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h new file mode 100644 index 0000000..4a2037f --- /dev/null +++ b/arch/um/include/asm/irq.h @@ -0,0 +1,23 @@ +#ifndef __UM_IRQ_H +#define __UM_IRQ_H + +#define TIMER_IRQ 0 +#define UMN_IRQ 1 +#define CONSOLE_IRQ 2 +#define CONSOLE_WRITE_IRQ 3 +#define UBD_IRQ 4 +#define UM_ETH_IRQ 5 +#define SSL_IRQ 6 +#define SSL_WRITE_IRQ 7 +#define ACCEPT_IRQ 8 +#define MCONSOLE_IRQ 9 +#define WINCH_IRQ 10 +#define SIGIO_WRITE_IRQ 11 +#define TELNETD_IRQ 12 +#define XTERM_IRQ 13 +#define RANDOM_IRQ 14 + +#define LAST_IRQ RANDOM_IRQ +#define NR_IRQS (LAST_IRQ + 1) + +#endif diff --git a/arch/um/include/asm/irq_regs.h b/arch/um/include/asm/irq_regs.h new file mode 100644 index 0000000..3dd9c0b --- /dev/null +++ b/arch/um/include/asm/irq_regs.h @@ -0,0 +1 @@ +#include <asm-generic/irq_regs.h> diff --git a/arch/um/include/asm/irq_vectors.h b/arch/um/include/asm/irq_vectors.h new file mode 100644 index 0000000..62ddba6 --- /dev/null +++ b/arch/um/include/asm/irq_vectors.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef __UM_IRQ_VECTORS_H +#define __UM_IRQ_VECTORS_H + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/irqflags.h b/arch/um/include/asm/irqflags.h new file mode 100644 index 0000000..659b9ab --- /dev/null +++ b/arch/um/include/asm/irqflags.h @@ -0,0 +1,6 @@ +#ifndef __UM_IRQFLAGS_H +#define __UM_IRQFLAGS_H + +/* Empty for now */ + +#endif diff --git a/arch/um/include/asm/kdebug.h b/arch/um/include/asm/kdebug.h new file mode 100644 index 0000000..6ece1b0 --- /dev/null +++ b/arch/um/include/asm/kdebug.h @@ -0,0 +1 @@ +#include <asm-generic/kdebug.h> diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h new file mode 100644 index 0000000..6c03acd --- /dev/null +++ b/arch/um/include/asm/kmap_types.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef __UM_KMAP_TYPES_H +#define __UM_KMAP_TYPES_H + +/* No more #include "asm/arch/kmap_types.h" ! */ + +enum km_type { + KM_BOUNCE_READ, + KM_SKB_SUNRPC_DATA, + KM_SKB_DATA_SOFTIRQ, + KM_USER0, + KM_USER1, + KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, + KM_PTE0, + KM_PTE1, + KM_IRQ0, + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, + KM_TYPE_NR +}; + +#endif diff --git a/arch/um/include/asm/ldt.h b/arch/um/include/asm/ldt.h new file mode 100644 index 0000000..52af512 --- /dev/null +++ b/arch/um/include/asm/ldt.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2004 Fujitsu Siemens Computers GmbH + * Licensed under the GPL + * + * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com> + */ + +#ifndef __ASM_LDT_H +#define __ASM_LDT_H + +#include <linux/mutex.h> +#include "asm/host_ldt.h" + +extern void ldt_host_info(void); + +#define LDT_PAGES_MAX \ + ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE) +#define LDT_ENTRIES_PER_PAGE \ + (PAGE_SIZE/LDT_ENTRY_SIZE) +#define LDT_DIRECT_ENTRIES \ + ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE) + +struct ldt_entry { + __u32 a; + __u32 b; +}; + +typedef struct uml_ldt { + int entry_count; + struct mutex lock; + union { + struct ldt_entry * pages[LDT_PAGES_MAX]; + struct ldt_entry entries[LDT_DIRECT_ENTRIES]; + } u; +} uml_ldt_t; + +#endif diff --git a/arch/um/include/asm/linkage.h b/arch/um/include/asm/linkage.h new file mode 100644 index 0000000..7dfce37 --- /dev/null +++ b/arch/um/include/asm/linkage.h @@ -0,0 +1,6 @@ +#ifndef __ASM_UM_LINKAGE_H +#define __ASM_UM_LINKAGE_H + +#include "asm/arch/linkage.h" + +#endif diff --git a/arch/um/include/asm/local.h b/arch/um/include/asm/local.h new file mode 100644 index 0000000..9a280c5 --- /dev/null +++ b/arch/um/include/asm/local.h @@ -0,0 +1,6 @@ +#ifndef __UM_LOCAL_H +#define __UM_LOCAL_H + +#include "asm/arch/local.h" + +#endif diff --git a/arch/um/include/asm/locks.h b/arch/um/include/asm/locks.h new file mode 100644 index 0000000..f80030a --- /dev/null +++ b/arch/um/include/asm/locks.h @@ -0,0 +1,6 @@ +#ifndef __UM_LOCKS_H +#define __UM_LOCKS_H + +#include "asm/arch/locks.h" + +#endif diff --git a/arch/um/include/asm/mca_dma.h b/arch/um/include/asm/mca_dma.h new file mode 100644 index 0000000..e492e4e --- /dev/null +++ b/arch/um/include/asm/mca_dma.h @@ -0,0 +1,6 @@ +#ifndef mca___UM_DMA_H +#define mca___UM_DMA_H + +#include "asm/arch/mca_dma.h" + +#endif diff --git a/arch/um/include/asm/mman.h b/arch/um/include/asm/mman.h new file mode 100644 index 0000000..b09ed52 --- /dev/null +++ b/arch/um/include/asm/mman.h @@ -0,0 +1,6 @@ +#ifndef __UM_MMAN_H +#define __UM_MMAN_H + +#include "asm/arch/mman.h" + +#endif diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h new file mode 100644 index 0000000..2cf35c2 --- /dev/null +++ b/arch/um/include/asm/mmu.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef __MMU_H +#define __MMU_H + +#include "um_mmu.h" + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h new file mode 100644 index 0000000..54f42e8 --- /dev/null +++ b/arch/um/include/asm/mmu_context.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ + +#ifndef __UM_MMU_CONTEXT_H +#define __UM_MMU_CONTEXT_H + +#include "linux/sched.h" +#include "um_mmu.h" + +extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); +extern void arch_exit_mmap(struct mm_struct *mm); + +#define get_mmu_context(task) do ; while(0) +#define activate_context(tsk) do ; while(0) + +#define deactivate_mm(tsk,mm) do { } while (0) + +extern void force_flush_all(void); + +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) +{ + /* + * This is called by fs/exec.c and sys_unshare() + * when the new ->mm is used for the first time. + */ + __switch_mm(&new->context.id); + arch_dup_mmap(old, new); +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned cpu = smp_processor_id(); + + if(prev != next){ + cpu_clear(cpu, prev->cpu_vm_mask); + cpu_set(cpu, next->cpu_vm_mask); + if(next != &init_mm) + __switch_mm(&next->context.id); + } +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +extern int init_new_context(struct task_struct *task, struct mm_struct *mm); + +extern void destroy_context(struct mm_struct *mm); + +#endif diff --git a/arch/um/include/asm/module-generic.h b/arch/um/include/asm/module-generic.h new file mode 100644 index 0000000..5a265f5 --- /dev/null +++ b/arch/um/include/asm/module-generic.h @@ -0,0 +1,6 @@ +#ifndef __UM_MODULE_GENERIC_H +#define __UM_MODULE_GENERIC_H + +#include "asm/arch/module.h" + +#endif diff --git a/arch/um/include/asm/module-i386.h b/arch/um/include/asm/module-i386.h new file mode 100644 index 0000000..5ead4a0 --- /dev/null +++ b/arch/um/include/asm/module-i386.h @@ -0,0 +1,13 @@ +#ifndef __UM_MODULE_I386_H +#define __UM_MODULE_I386_H + +/* UML is simple */ +struct mod_arch_specific +{ +}; + +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr + +#endif diff --git a/arch/um/include/asm/module-x86_64.h b/arch/um/include/asm/module-x86_64.h new file mode 100644 index 0000000..35b5491 --- /dev/null +++ b/arch/um/include/asm/module-x86_64.h @@ -0,0 +1,30 @@ +/* + * Copyright 2003 PathScale, Inc. + * + * Licensed under the GPL + */ + +#ifndef __UM_MODULE_X86_64_H +#define __UM_MODULE_X86_64_H + +/* UML is simple */ +struct mod_arch_specific +{ +}; + +#define Elf_Shdr Elf64_Shdr +#define Elf_Sym Elf64_Sym +#define Elf_Ehdr Elf64_Ehdr + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/msgbuf.h b/arch/um/include/asm/msgbuf.h new file mode 100644 index 0000000..8ce8c30 --- /dev/null +++ b/arch/um/include/asm/msgbuf.h @@ -0,0 +1,6 @@ +#ifndef __UM_MSGBUF_H +#define __UM_MSGBUF_H + +#include "asm/arch/msgbuf.h" + +#endif diff --git a/arch/um/include/asm/mtrr.h b/arch/um/include/asm/mtrr.h new file mode 100644 index 0000000..5e9cd12 --- /dev/null +++ b/arch/um/include/asm/mtrr.h @@ -0,0 +1,6 @@ +#ifndef __UM_MTRR_H +#define __UM_MTRR_H + +#include "asm/arch/mtrr.h" + +#endif diff --git a/arch/um/include/asm/mutex.h b/arch/um/include/asm/mutex.h new file mode 100644 index 0000000..458c1f7 --- /dev/null +++ b/arch/um/include/asm/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include <asm-generic/mutex-dec.h> diff --git a/arch/um/include/asm/nops.h b/arch/um/include/asm/nops.h new file mode 100644 index 0000000..814e9bf --- /dev/null +++ b/arch/um/include/asm/nops.h @@ -0,0 +1,6 @@ +#ifndef __UM_NOPS_H +#define __UM_NOPS_H + +#include "asm/arch/nops.h" + +#endif diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h new file mode 100644 index 0000000..a6df1f1 --- /dev/null +++ b/arch/um/include/asm/page.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) + * Copyright 2003 PathScale, Inc. + * Licensed under the GPL + */ + +#ifndef __UM_PAGE_H +#define __UM_PAGE_H + +#include <linux/const.h> + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifndef __ASSEMBLY__ + +struct page; + +#include <linux/types.h> +#include <asm/vm-flags.h> + +/* + * These are used to make use of C type-checking.. + */ + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT) + +typedef struct { unsigned long pte_low, pte_high; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32)) + +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits)) +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits)) +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits)) +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \ + smp_wmb(); \ + (to).pte_low = (from).pte_low; }) +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high) +#define pte_set_val(pte, phys, prot) \ + ({ (pte).pte_high = (phys) >> 32; \ + (pte).pte_low = (phys) | pgprot_val(prot); }) + +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) } ) + +typedef unsigned long long pfn_t; +typedef unsigned long long phys_t; + +#else + +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pgd; } pgd_t; + +#ifdef CONFIG_3_LEVEL_PGTABLES +typedef struct { unsigned long pmd; } pmd_t; +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) } ) +#endif + +#define pte_val(x) ((x).pte) + + +#define pte_get_bits(p, bits) ((p).pte & (bits)) +#define pte_set_bits(p, bits) ((p).pte |= (bits)) +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits)) +#define pte_copy(to, from) ((to).pte = (from).pte) +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE)) +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot)) + +typedef unsigned long pfn_t; +typedef unsigned long phys_t; + +#endif + +typedef struct { unsigned long pgprot; } pgprot_t; + +typedef struct page *pgtable_t; + +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +extern unsigned long uml_physmem; + +#define PAGE_OFFSET (uml_physmem) +#define KERNELBASE PAGE_OFFSET + +#define __va_space (8*1024*1024) + +#include "mem.h" + +/* Cast to unsigned long before casting to void * to avoid a warning from + * mmap_kmem about cutting a long long down to a void *. Not sure that + * casting is the right thing, but 32-bit UML can't have 64-bit virtual + * addresses + */ +#define __pa(virt) to_phys((void *) (unsigned long) (virt)) +#define __va(phys) to_virt((unsigned long) (phys)) + +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT)) +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT)) + +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) + +#include <asm-generic/memory_model.h> +#include <asm-generic/page.h> + +#endif /* __ASSEMBLY__ */ +#endif /* __UM_PAGE_H */ diff --git a/arch/um/include/asm/page_offset.h b/arch/um/include/asm/page_offset.h new file mode 100644 index 0000000..1c168df --- /dev/null +++ b/arch/um/include/asm/page_offset.h @@ -0,0 +1 @@ +#define PAGE_OFFSET_RAW (uml_physmem) diff --git a/arch/um/include/asm/param.h b/arch/um/include/asm/param.h new file mode 100644 index 0000000..e44f4e6 --- /dev/null +++ b/arch/um/include/asm/param.h @@ -0,0 +1,20 @@ +#ifndef _UM_PARAM_H +#define _UM_PARAM_H + +#define EXEC_PAGESIZE 4096 + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#ifdef __KERNEL__ +#define HZ CONFIG_HZ +#define USER_HZ 100 /* .. some user interfaces are in "ticks" */ +#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */ +#else +#define HZ 100 +#endif + +#endif diff --git a/arch/um/include/asm/paravirt.h b/arch/um/include/asm/paravirt.h new file mode 100644 index 0000000..9d6aaad --- /dev/null +++ b/arch/um/include/asm/paravirt.h @@ -0,0 +1,6 @@ +#ifndef __UM_PARAVIRT_H +#define __UM_PARAVIRT_H + +#include "asm/arch/paravirt.h" + +#endif diff --git a/arch/um/include/asm/pci.h b/arch/um/include/asm/pci.h new file mode 100644 index 0000000..5992319 --- /dev/null +++ b/arch/um/include/asm/pci.h @@ -0,0 +1,7 @@ +#ifndef __UM_PCI_H +#define __UM_PCI_H + +#define PCI_DMA_BUS_IS_PHYS (1) +#define pcibios_scan_all_fns(a, b) 0 + +#endif diff --git a/arch/um/include/asm/pda.h b/arch/um/include/asm/pda.h new file mode 100644 index 0000000..0d8bf33 --- /dev/null +++ b/arch/um/include/asm/pda.h @@ -0,0 +1,31 @@ +/* + * Copyright 2003 PathScale, Inc. + * + * Licensed under the GPL + */ + +#ifndef __UM_PDA_X86_64_H +#define __UM_PDA_X86_64_H + +/* XXX */ +struct foo { + unsigned int __softirq_pending; + unsigned int __nmi_count; +}; + +extern struct foo me; + +#define read_pda(me) (&me) + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/percpu.h b/arch/um/include/asm/percpu.h new file mode 100644 index 0000000..5723e2a --- /dev/null +++ b/arch/um/include/asm/percpu.h @@ -0,0 +1,6 @@ +#ifndef __UM_PERCPU_H +#define __UM_PERCPU_H + +#include "asm/arch/percpu.h" + +#endif diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h new file mode 100644 index 0000000..9062a6e --- /dev/null +++ b/arch/um/include/asm/pgalloc.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) + * Copyright 2003 PathScale, Inc. + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h + * Licensed under the GPL + */ + +#ifndef __UM_PGALLOC_H +#define __UM_PGALLOC_H + +#include "linux/mm.h" +#include "asm/fixmap.h" + +#define pmd_populate_kernel(mm, pmd, pte) \ + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) + +#define pmd_populate(mm, pmd, pte) \ + set_pmd(pmd, __pmd(_PAGE_TABLE + \ + ((unsigned long long)page_to_pfn(pte) << \ + (unsigned long long) PAGE_SHIFT))) +#define pmd_pgtable(pmd) pmd_page(pmd) + +/* + * Allocate and free page tables. + */ +extern pgd_t *pgd_alloc(struct mm_struct *); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); + +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long); + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long) pte); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + pgtable_page_dtor(pte); + __free_page(pte); +} + +#define __pte_free_tlb(tlb,pte) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb),(pte)); \ +} while (0) + +#ifdef CONFIG_3_LEVEL_PGTABLES + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + free_page((unsigned long)pmd); +} + +#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) +#endif + +#define check_pgt_cache() do { } while (0) + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h new file mode 100644 index 0000000..f534b73 --- /dev/null +++ b/arch/um/include/asm/pgtable-2level.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) + * Copyright 2003 PathScale, Inc. + * Derived from include/asm-i386/pgtable.h + * Licensed under the GPL + */ + +#ifndef __UM_PGTABLE_2LEVEL_H +#define __UM_PGTABLE_2LEVEL_H + +#include <asm-generic/pgtable-nopmd.h> + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ + +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * entries per page directory level: the i386 is two-level, so + * we don't really have any PMD directory physically. + */ +#define PTRS_PER_PTE 1024 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) +#define PTRS_PER_PGD 1024 +#define FIRST_USER_ADDRESS 0 + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \ + pte_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \ + pgd_val(e)) + +static inline int pgd_newpage(pgd_t pgd) { return 0; } +static inline void pgd_mkuptodate(pgd_t pgd) { } + +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) + +#define pte_pfn(x) phys_to_pfn(pte_val(x)) +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot)) + +/* + * Bits 0 through 4 are taken + */ +#define PTE_FILE_MAX_BITS 27 + +#define pte_to_pgoff(pte) (pte_val(pte) >> 5) + +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE }) + +#endif diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h new file mode 100644 index 0000000..0446f45 --- /dev/null +++ b/arch/um/include/asm/pgtable-3level.h @@ -0,0 +1,146 @@ +/* + * Copyright 2003 PathScale Inc + * Derived from include/asm-i386/pgtable.h + * Licensed under the GPL + */ + +#ifndef __UM_PGTABLE_3LEVEL_H +#define __UM_PGTABLE_3LEVEL_H + +#include <asm-generic/pgtable-nopud.h> + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ + +#ifdef CONFIG_64BIT +#define PGDIR_SHIFT 30 +#else +#define PGDIR_SHIFT 31 +#endif +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* PMD_SHIFT determines the size of the area a second-level page table can + * map + */ + +#define PMD_SHIFT 21 +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* + * entries per page directory level + */ + +#define PTRS_PER_PTE 512 +#ifdef CONFIG_64BIT +#define PTRS_PER_PMD 512 +#define PTRS_PER_PGD 512 +#else +#define PTRS_PER_PMD 1024 +#define PTRS_PER_PGD 1024 +#endif + +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0 + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \ + pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \ + pmd_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \ + pgd_val(e)) + +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE)) +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT) +#define pud_populate(mm, pud, pmd) \ + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) + +#ifdef CONFIG_64BIT +#define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval)) +#else +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval)) +#endif + +static inline int pgd_newpage(pgd_t pgd) +{ + return(pgd_val(pgd) & _PAGE_NEWPAGE); +} + +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; } + +#ifdef CONFIG_64BIT +#define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval)) +#else +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) +#endif + +struct mm_struct; +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); + +static inline void pud_clear (pud_t *pud) +{ + set_pud(pud, __pud(_PAGE_NEWPAGE)); +} + +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK) +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) + +/* Find an entry in the second-level page table.. */ +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \ + pmd_index(address)) + +static inline unsigned long pte_pfn(pte_t pte) +{ + return phys_to_pfn(pte_val(pte)); +} + +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot) +{ + pte_t pte; + phys_t phys = pfn_to_phys(page_nr); + + pte_set_val(pte, phys, pgprot); + return pte; +} + +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot) +{ + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)); +} + +/* + * Bits 0 through 3 are taken in the low part of the pte, + * put the 32 bits of offset into the high part. + */ +#define PTE_FILE_MAX_BITS 32 + +#ifdef CONFIG_64BIT + +#define pte_to_pgoff(p) ((p).pte >> 32) + +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE }) + +#else + +#define pte_to_pgoff(pte) ((pte).pte_high) + +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) + +#endif + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h new file mode 100644 index 0000000..02db81b --- /dev/null +++ b/arch/um/include/asm/pgtable.h @@ -0,0 +1,358 @@ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Copyright 2003 PathScale, Inc. + * Derived from include/asm-i386/pgtable.h + * Licensed under the GPL + */ + +#ifndef __UM_PGTABLE_H +#define __UM_PGTABLE_H + +#include <asm/fixmap.h> + +#define _PAGE_PRESENT 0x001 +#define _PAGE_NEWPAGE 0x002 +#define _PAGE_NEWPROT 0x004 +#define _PAGE_RW 0x020 +#define _PAGE_USER 0x040 +#define _PAGE_ACCESSED 0x080 +#define _PAGE_DIRTY 0x100 +/* If _PAGE_PRESENT is clear, we use these: */ +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */ +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE; + pte_present gives true */ + +#ifdef CONFIG_3_LEVEL_PGTABLES +#include "asm/pgtable-3level.h" +#else +#include "asm/pgtable-2level.h" +#endif + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* zero page used for uninitialized stuff */ +extern unsigned long *empty_zero_page; + +#define pgtable_cache_init() do ; while (0) + +/* Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + */ + +extern unsigned long end_iomem; + +#define VMALLOC_OFFSET (__va_space) +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) +#ifdef CONFIG_HIGHMEM +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) +#else +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) +#endif + +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) + +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) + +/* + * The i386 can't do page protection for execute, and considers that the same + * are read. + * Also, write permissions imply read permissions. This is the closest we can + * get.. + */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) + +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) + +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) + +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) + +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) + +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) + +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) + +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) + +/* + * ================================= + * Flags checking section. + * ================================= + */ + +static inline int pte_none(pte_t pte) +{ + return pte_is_zero(pte); +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_read(pte_t pte) +{ + return((pte_get_bits(pte, _PAGE_USER)) && + !(pte_get_bits(pte, _PAGE_PROTNONE))); +} + +static inline int pte_exec(pte_t pte){ + return((pte_get_bits(pte, _PAGE_USER)) && + !(pte_get_bits(pte, _PAGE_PROTNONE))); +} + +static inline int pte_write(pte_t pte) +{ + return((pte_get_bits(pte, _PAGE_RW)) && + !(pte_get_bits(pte, _PAGE_PROTNONE))); +} + +/* + * The following only works if pte_present() is not true. + */ +static inline int pte_file(pte_t pte) +{ + return pte_get_bits(pte, _PAGE_FILE); +} + +static inline int pte_dirty(pte_t pte) +{ + return pte_get_bits(pte, _PAGE_DIRTY); +} + +static inline int pte_young(pte_t pte) +{ + return pte_get_bits(pte, _PAGE_ACCESSED); +} + +static inline int pte_newpage(pte_t pte) +{ + return pte_get_bits(pte, _PAGE_NEWPAGE); +} + +static inline int pte_newprot(pte_t pte) +{ + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); +} + +static inline int pte_special(pte_t pte) +{ + return 0; +} + +/* + * ================================= + * Flags setting section. + * ================================= + */ + +static inline pte_t pte_mknewprot(pte_t pte) +{ + pte_set_bits(pte, _PAGE_NEWPROT); + return(pte); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_clear_bits(pte, _PAGE_DIRTY); + return(pte); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_clear_bits(pte, _PAGE_ACCESSED); + return(pte); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_clear_bits(pte, _PAGE_RW); + return(pte_mknewprot(pte)); +} + +static inline pte_t pte_mkread(pte_t pte) +{ + pte_set_bits(pte, _PAGE_USER); + return(pte_mknewprot(pte)); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_set_bits(pte, _PAGE_DIRTY); + return(pte); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_set_bits(pte, _PAGE_ACCESSED); + return(pte); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_set_bits(pte, _PAGE_RW); + return(pte_mknewprot(pte)); +} + +static inline pte_t pte_mkuptodate(pte_t pte) +{ + pte_clear_bits(pte, _PAGE_NEWPAGE); + if(pte_present(pte)) + pte_clear_bits(pte, _PAGE_NEWPROT); + return(pte); +} + +static inline pte_t pte_mknewpage(pte_t pte) +{ + pte_set_bits(pte, _PAGE_NEWPAGE); + return(pte); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return(pte); +} + +static inline void set_pte(pte_t *pteptr, pte_t pteval) +{ + pte_copy(*pteptr, pteval); + + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to + * mapped pages. + */ + + *pteptr = pte_mknewpage(*pteptr); + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); +} +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ + +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) +#define __virt_to_page(virt) phys_to_page(__pa(virt)) +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page)) +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr) + +#define mk_pte(page, pgprot) \ + ({ pte_t pte; \ + \ + pte_set_val(pte, page_to_phys(page), (pgprot)); \ + if (pte_present(pte)) \ + pte_mknewprot(pte_mknewpage(pte)); \ + pte;}) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); + return pte; +} + +/* + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] + * + * this macro returns the index of the entry in the pgd page which would + * control the given virtual address + */ +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) + +/* + * pgd_offset() returns a (pgd_t *) + * pgd_index() is used get the offset into the pgd page's array of pgd_t's; + */ +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) + +/* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's + */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] + * + * this macro returns the index of the entry in the pmd page which would + * control the given virtual address + */ +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +#define pmd_page_vaddr(pmd) \ + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + +/* + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] + * + * this macro returns the index of the entry in the pte page which would + * control the given virtual address + */ +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) +#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + +struct mm_struct; +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); + +#define update_mmu_cache(vma,address,pte) do ; while (0) + +/* Encode and de-code a swap entry */ +#define __swp_type(x) (((x).val >> 4) & 0x3f) +#define __swp_offset(x) ((x).val >> 11) + +#define __swp_entry(type, offset) \ + ((swp_entry_t) { ((type) << 4) | ((offset) << 11) }) +#define __pte_to_swp_entry(pte) \ + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define kern_addr_valid(addr) (1) + +#include <asm-generic/pgtable.h> + +#endif diff --git a/arch/um/include/asm/poll.h b/arch/um/include/asm/poll.h new file mode 100644 index 0000000..1eb4e1b --- /dev/null +++ b/arch/um/include/asm/poll.h @@ -0,0 +1,6 @@ +#ifndef __UM_POLL_H +#define __UM_POLL_H + +#include "asm/arch/poll.h" + +#endif diff --git a/arch/um/include/asm/posix_types.h b/arch/um/include/asm/posix_types.h new file mode 100644 index 0000000..32fb419 --- /dev/null +++ b/arch/um/include/asm/posix_types.h @@ -0,0 +1,6 @@ +#ifndef __UM_POSIX_TYPES_H +#define __UM_POSIX_TYPES_H + +#include "asm/arch/posix_types.h" + +#endif diff --git a/arch/um/include/asm/prctl.h b/arch/um/include/asm/prctl.h new file mode 100644 index 0000000..64b6d09 --- /dev/null +++ b/arch/um/include/asm/prctl.h @@ -0,0 +1,6 @@ +#ifndef __UM_PRCTL_H +#define __UM_PRCTL_H + +#include "asm/arch/prctl.h" + +#endif diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h new file mode 100644 index 0000000..bed6688 --- /dev/null +++ b/arch/um/include/asm/processor-generic.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ + +#ifndef __UM_PROCESSOR_GENERIC_H +#define __UM_PROCESSOR_GENERIC_H + +struct pt_regs; + +struct task_struct; + +#include "asm/ptrace.h" +#include "registers.h" +#include "sysdep/archsetjmp.h" + +struct mm_struct; + +struct thread_struct { + struct task_struct *saved_task; + /* + * This flag is set to 1 before calling do_fork (and analyzed in + * copy_thread) to mark that we are begin called from userspace (fork / + * vfork / clone), and reset to 0 after. It is left to 0 when called + * from kernelspace (i.e. kernel_thread() or fork_idle(), + * as of 2.6.11). + */ + int forking; + struct pt_regs regs; + int singlestep_syscall; + void *fault_addr; + jmp_buf *fault_catcher; + struct task_struct *prev_sched; + unsigned long temp_stack; + jmp_buf *exec_buf; + struct arch_thread arch; + jmp_buf switch_buf; + int mm_count; + struct { + int op; + union { + struct { + int pid; + } fork, exec; + struct { + int (*proc)(void *); + void *arg; + } thread; + struct { + void (*proc)(void *); + void *arg; + } cb; + } u; + } request; +}; + +#define INIT_THREAD \ +{ \ + .forking = 0, \ + .regs = EMPTY_REGS, \ + .fault_addr = NULL, \ + .prev_sched = NULL, \ + .temp_stack = 0, \ + .exec_buf = NULL, \ + .arch = INIT_ARCH_THREAD, \ + .request = { 0 } \ +} + +extern struct task_struct *alloc_task_struct(void); + +static inline void release_thread(struct task_struct *task) +{ +} + +extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +static inline void prepare_to_copy(struct task_struct *tsk) +{ +} + + +extern unsigned long thread_saved_pc(struct task_struct *t); + +static inline void mm_copy_segments(struct mm_struct *from_mm, + struct mm_struct *new_mm) +{ +} + +#define init_stack (init_thread_union.stack) + +/* + * User space process size: 3GB (default). + */ +extern unsigned long task_size; + +#define TASK_SIZE (task_size) + +#undef STACK_TOP +#undef STACK_TOP_MAX + +extern unsigned long stacksizelim; + +#define STACK_ROOM (stacksizelim) +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE) +#define STACK_TOP_MAX STACK_TOP + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (0x40000000) + +extern void start_thread(struct pt_regs *regs, unsigned long entry, + unsigned long stack); + +struct cpuinfo_um { + unsigned long loops_per_jiffy; + int ipi_pipe[2]; +}; + +extern struct cpuinfo_um boot_cpu_data; + +#define my_cpu_data cpu_data[smp_processor_id()] + +#ifdef CONFIG_SMP +extern struct cpuinfo_um cpu_data[]; +#define current_cpu_data cpu_data[smp_processor_id()] +#else +#define cpu_data (&boot_cpu_data) +#define current_cpu_data boot_cpu_data +#endif + + +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf) +extern unsigned long get_wchan(struct task_struct *p); + +#endif diff --git a/arch/um/include/asm/processor-i386.h b/arch/um/include/asm/processor-i386.h new file mode 100644 index 0000000..a2b7fe1 --- /dev/null +++ b/arch/um/include/asm/processor-i386.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef __UM_PROCESSOR_I386_H +#define __UM_PROCESSOR_I386_H + +#include "linux/string.h" +#include "asm/host_ldt.h" +#include "asm/segment.h" + +extern int host_has_cmov; + +/* include faultinfo structure */ +#include "sysdep/faultinfo.h" + +struct uml_tls_struct { + struct user_desc tls; + unsigned flushed:1; + unsigned present:1; +}; + +struct arch_thread { + struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; + unsigned long debugregs[8]; + int debugregs_seq; + struct faultinfo faultinfo; +}; + +#define INIT_ARCH_THREAD { \ + .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \ + { .present = 0, .flushed = 0 } }, \ + .debugregs = { [ 0 ... 7 ] = 0 }, \ + .debugregs_seq = 0, \ + .faultinfo = { 0, 0, 0 } \ +} + +static inline void arch_flush_thread(struct arch_thread *thread) +{ + /* Clear any TLS still hanging */ + memset(&thread->tls_array, 0, sizeof(thread->tls_array)); +} + +static inline void arch_copy_thread(struct arch_thread *from, + struct arch_thread *to) +{ + memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array)); +} + +#include "asm/arch/user.h" + +/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ +static inline void rep_nop(void) +{ + __asm__ __volatile__("rep;nop": : :"memory"); +} + +#define cpu_relax() rep_nop() + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). Stolen + * from asm-i386/processor.h + */ +#define current_text_addr() \ + ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) + +#define ARCH_IS_STACKGROW(address) \ + (address + 32 >= UPT_SP(¤t->thread.regs.regs)) + +#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP) +#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP) +#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP) + +#include "asm/processor-generic.h" + +#endif diff --git a/arch/um/include/asm/processor-ppc.h b/arch/um/include/asm/processor-ppc.h new file mode 100644 index 0000000..9593231 --- /dev/null +++ b/arch/um/include/asm/processor-ppc.h @@ -0,0 +1,15 @@ +#ifndef __UM_PROCESSOR_PPC_H +#define __UM_PROCESSOR_PPC_H + +#if defined(__ASSEMBLY__) + +#define CONFIG_PPC_MULTIPLATFORM +#include "arch/processor.h" + +#else + +#include "asm/processor-generic.h" + +#endif + +#endif diff --git a/arch/um/include/asm/processor-x86_64.h b/arch/um/include/asm/processor-x86_64.h new file mode 100644 index 0000000..e509331 --- /dev/null +++ b/arch/um/include/asm/processor-x86_64.h @@ -0,0 +1,56 @@ +/* + * Copyright 2003 PathScale, Inc. + * + * Licensed under the GPL + */ + +#ifndef __UM_PROCESSOR_X86_64_H +#define __UM_PROCESSOR_X86_64_H + +/* include faultinfo structure */ +#include "sysdep/faultinfo.h" + +struct arch_thread { + unsigned long debugregs[8]; + int debugregs_seq; + unsigned long fs; + struct faultinfo faultinfo; +}; + +/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ +static inline void rep_nop(void) +{ + __asm__ __volatile__("rep;nop": : :"memory"); +} + +#define cpu_relax() rep_nop() + +#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \ + .debugregs_seq = 0, \ + .fs = 0, \ + .faultinfo = { 0, 0, 0 } } + +static inline void arch_flush_thread(struct arch_thread *thread) +{ +} + +static inline void arch_copy_thread(struct arch_thread *from, + struct arch_thread *to) +{ + to->fs = from->fs; +} + +#include "asm/arch/user.h" + +#define current_text_addr() \ + ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; }) + +#define ARCH_IS_STACKGROW(address) \ + (address + 128 >= UPT_SP(¤t->thread.regs.regs)) + +#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP) +#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP) + +#include "asm/processor-generic.h" + +#endif diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h new file mode 100644 index 0000000..3157497 --- /dev/null +++ b/arch/um/include/asm/ptrace-generic.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ + +#ifndef __UM_PTRACE_GENERIC_H +#define __UM_PTRACE_GENERIC_H + +#ifndef __ASSEMBLY__ + +#include "asm/arch/ptrace-abi.h" +#include <asm/user.h> +#include "sysdep/ptrace.h" + +struct pt_regs { + struct uml_pt_regs regs; +}; + +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS } + +#define PT_REGS_IP(r) UPT_IP(&(r)->regs) +#define PT_REGS_SP(r) UPT_SP(&(r)->regs) + +#define PT_REG(r, reg) UPT_REG(&(r)->regs, reg) +#define PT_REGS_SET(r, reg, val) UPT_SET(&(r)->regs, reg, val) + +#define PT_REGS_SET_SYSCALL_RETURN(r, res) \ + UPT_SET_SYSCALL_RETURN(&(r)->regs, res) +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs) + +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs) + +#define PT_REGS_SC(r) UPT_SC(&(r)->regs) + +#define instruction_pointer(regs) PT_REGS_IP(regs) + +struct task_struct; + +extern long subarch_ptrace(struct task_struct *child, long request, long addr, + long data); +extern unsigned long getreg(struct task_struct *child, int regno); +extern int putreg(struct task_struct *child, int regno, unsigned long value); +extern int get_fpregs(struct user_i387_struct __user *buf, + struct task_struct *child); +extern int set_fpregs(struct user_i387_struct __user *buf, + struct task_struct *child); + +extern void show_regs(struct pt_regs *regs); + +extern int arch_copy_tls(struct task_struct *new); +extern void clear_flushed_tls(struct task_struct *task); + +#endif + +#endif diff --git a/arch/um/include/asm/ptrace-i386.h b/arch/um/include/asm/ptrace-i386.h new file mode 100644 index 0000000..b2d24c5e --- /dev/null +++ b/arch/um/include/asm/ptrace-i386.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ + +#ifndef __UM_PTRACE_I386_H +#define __UM_PTRACE_I386_H + +#define HOST_AUDIT_ARCH AUDIT_ARCH_I386 + +#include "linux/compiler.h" +#include "asm/ptrace-generic.h" +#include <asm/user.h> +#include "sysdep/ptrace.h" + +#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs) +#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs) +#define PT_REGS_ECX(r) UPT_ECX(&(r)->regs) +#define PT_REGS_EDX(r) UPT_EDX(&(r)->regs) +#define PT_REGS_ESI(r) UPT_ESI(&(r)->regs) +#define PT_REGS_EDI(r) UPT_EDI(&(r)->regs) +#define PT_REGS_EBP(r) UPT_EBP(&(r)->regs) + +#define PT_REGS_CS(r) UPT_CS(&(r)->regs) +#define PT_REGS_SS(r) UPT_SS(&(r)->regs) +#define PT_REGS_DS(r) UPT_DS(&(r)->regs) +#define PT_REGS_ES(r) UPT_ES(&(r)->regs) +#define PT_REGS_FS(r) UPT_FS(&(r)->regs) +#define PT_REGS_GS(r) UPT_GS(&(r)->regs) + +#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs) + +#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r) +#define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r) +#define PT_FIX_EXEC_STACK(sp) do ; while(0) + +/* Cope with a conditional i386 definition. */ +#undef profile_pc +#define profile_pc(regs) PT_REGS_IP(regs) + +#define user_mode(r) UPT_IS_USER(&(r)->regs) + +/* + * Forward declaration to avoid including sysdep/tls.h, which causes a + * circular include, and compilation failures. + */ +struct user_desc; + +extern int get_fpxregs(struct user_fxsr_struct __user *buf, + struct task_struct *child); +extern int set_fpxregs(struct user_fxsr_struct __user *buf, + struct task_struct *tsk); + +extern int ptrace_get_thread_area(struct task_struct *child, int idx, + struct user_desc __user *user_desc); + +extern int ptrace_set_thread_area(struct task_struct *child, int idx, + struct user_desc __user *user_desc); + +#endif diff --git a/arch/um/include/asm/ptrace-x86_64.h b/arch/um/include/asm/ptrace-x86_64.h new file mode 100644 index 0000000..4c47535 --- /dev/null +++ b/arch/um/include/asm/ptrace-x86_64.h @@ -0,0 +1,81 @@ +/* + * Copyright 2003 PathScale, Inc. + * + * Licensed under the GPL + */ + +#ifndef __UM_PTRACE_X86_64_H +#define __UM_PTRACE_X86_64_H + +#include "linux/compiler.h" +#include "asm/errno.h" +#include "asm/host_ldt.h" + +#define __FRAME_OFFSETS /* Needed to get the R* macros */ +#include "asm/ptrace-generic.h" + +#define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64 + +/* Also defined in sysdep/ptrace.h, so may already be defined. */ +#ifndef FS_BASE +#define FS_BASE (21 * sizeof(unsigned long)) +#define GS_BASE (22 * sizeof(unsigned long)) +#define DS (23 * sizeof(unsigned long)) +#define ES (24 * sizeof(unsigned long)) +#define FS (25 * sizeof(unsigned long)) +#define GS (26 * sizeof(unsigned long)) +#endif + +#define PT_REGS_RBX(r) UPT_RBX(&(r)->regs) +#define PT_REGS_RCX(r) UPT_RCX(&(r)->regs) +#define PT_REGS_RDX(r) UPT_RDX(&(r)->regs) +#define PT_REGS_RSI(r) UPT_RSI(&(r)->regs) +#define PT_REGS_RDI(r) UPT_RDI(&(r)->regs) +#define PT_REGS_RBP(r) UPT_RBP(&(r)->regs) +#define PT_REGS_RAX(r) UPT_RAX(&(r)->regs) +#define PT_REGS_R8(r) UPT_R8(&(r)->regs) +#define PT_REGS_R9(r) UPT_R9(&(r)->regs) +#define PT_REGS_R10(r) UPT_R10(&(r)->regs) +#define PT_REGS_R11(r) UPT_R11(&(r)->regs) +#define PT_REGS_R12(r) UPT_R12(&(r)->regs) +#define PT_REGS_R13(r) UPT_R13(&(r)->regs) +#define PT_REGS_R14(r) UPT_R14(&(r)->regs) +#define PT_REGS_R15(r) UPT_R15(&(r)->regs) + +#define PT_REGS_FS(r) UPT_FS(&(r)->regs) +#define PT_REGS_GS(r) UPT_GS(&(r)->regs) +#define PT_REGS_DS(r) UPT_DS(&(r)->regs) +#define PT_REGS_ES(r) UPT_ES(&(r)->regs) +#define PT_REGS_SS(r) UPT_SS(&(r)->regs) +#define PT_REGS_CS(r) UPT_CS(&(r)->regs) + +#define PT_REGS_ORIG_RAX(r) UPT_ORIG_RAX(&(r)->regs) +#define PT_REGS_RIP(r) UPT_IP(&(r)->regs) +#define PT_REGS_RSP(r) UPT_SP(&(r)->regs) + +#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs) + +/* XXX */ +#define user_mode(r) UPT_IS_USER(&(r)->regs) +#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_RAX(r) +#define PT_REGS_SYSCALL_RET(r) PT_REGS_RAX(r) + +#define PT_FIX_EXEC_STACK(sp) do ; while(0) + +#define profile_pc(regs) PT_REGS_IP(regs) + +static inline int ptrace_get_thread_area(struct task_struct *child, int idx, + struct user_desc __user *user_desc) +{ + return -ENOSYS; +} + +static inline int ptrace_set_thread_area(struct task_struct *child, int idx, + struct user_desc __user *user_desc) +{ + return -ENOSYS; +} + +extern long arch_prctl(struct task_struct *task, int code, + unsigned long __user *addr); +#endif diff --git a/arch/um/include/asm/required-features.h b/arch/um/include/asm/required-features.h new file mode 100644 index 0000000..dfb967b --- /dev/null +++ b/arch/um/include/asm/required-features.h @@ -0,0 +1,9 @@ +#ifndef __UM_REQUIRED_FEATURES_H +#define __UM_REQUIRED_FEATURES_H + +/* + * Nothing to see, just need something for the i386 and x86_64 asm + * headers to include. + */ + +#endif diff --git a/arch/um/include/asm/resource.h b/arch/um/include/asm/resource.h new file mode 100644 index 0000000..c9b0740 --- /dev/null +++ b/arch/um/include/asm/resource.h @@ -0,0 +1,6 @@ +#ifndef __UM_RESOURCE_H +#define __UM_RESOURCE_H + +#include "asm/arch/resource.h" + +#endif diff --git a/arch/um/include/asm/rwlock.h b/arch/um/include/asm/rwlock.h new file mode 100644 index 0000000..ff383aa --- /dev/null +++ b/arch/um/include/asm/rwlock.h @@ -0,0 +1,6 @@ +#ifndef __UM_RWLOCK_H +#define __UM_RWLOCK_H + +#include "asm/arch/rwlock.h" + +#endif diff --git a/arch/um/include/asm/rwsem.h b/arch/um/include/asm/rwsem.h new file mode 100644 index 0000000..b5fc449 --- /dev/null +++ b/arch/um/include/asm/rwsem.h @@ -0,0 +1,6 @@ +#ifndef __UM_RWSEM_H__ +#define __UM_RWSEM_H__ + +#include "asm/arch/rwsem.h" + +#endif diff --git a/arch/um/include/asm/scatterlist.h b/arch/um/include/asm/scatterlist.h new file mode 100644 index 0000000..e92016a --- /dev/null +++ b/arch/um/include/asm/scatterlist.h @@ -0,0 +1,6 @@ +#ifndef __UM_SCATTERLIST_H +#define __UM_SCATTERLIST_H + +#include "asm/arch/scatterlist.h" + +#endif diff --git a/arch/um/include/asm/sections.h b/arch/um/include/asm/sections.h new file mode 100644 index 0000000..6b0231e --- /dev/null +++ b/arch/um/include/asm/sections.h @@ -0,0 +1,7 @@ +#ifndef _UM_SECTIONS_H +#define _UM_SECTIONS_H + +/* nothing to see, move along */ +#include <asm-generic/sections.h> + +#endif diff --git a/arch/um/include/asm/segment.h b/arch/um/include/asm/segment.h new file mode 100644 index 0000000..45183fc --- /dev/null +++ b/arch/um/include/asm/segment.h @@ -0,0 +1,10 @@ +#ifndef __UM_SEGMENT_H +#define __UM_SEGMENT_H + +extern int host_gdt_entry_tls_min; + +#define GDT_ENTRY_TLS_ENTRIES 3 +#define GDT_ENTRY_TLS_MIN host_gdt_entry_tls_min +#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) + +#endif diff --git a/arch/um/include/asm/sembuf.h b/arch/um/include/asm/sembuf.h new file mode 100644 index 0000000..1ae82c1 --- /dev/null +++ b/arch/um/include/asm/sembuf.h @@ -0,0 +1,6 @@ +#ifndef __UM_SEMBUF_H +#define __UM_SEMBUF_H + +#include "asm/arch/sembuf.h" + +#endif diff --git a/arch/um/include/asm/serial.h b/arch/um/include/asm/serial.h new file mode 100644 index 0000000..61ad07c --- /dev/null +++ b/arch/um/include/asm/serial.h @@ -0,0 +1,6 @@ +#ifndef __UM_SERIAL_H +#define __UM_SERIAL_H + +#include "asm/arch/serial.h" + +#endif diff --git a/arch/um/include/asm/setup.h b/arch/um/include/asm/setup.h new file mode 100644 index 0000000..99f0863 --- /dev/null +++ b/arch/um/include/asm/setup.h @@ -0,0 +1,10 @@ +#ifndef SETUP_H_INCLUDED +#define SETUP_H_INCLUDED + +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the + * command line, so this choice is ok. + */ + +#define COMMAND_LINE_SIZE 4096 + +#endif /* SETUP_H_INCLUDED */ diff --git a/arch/um/include/asm/shmbuf.h b/arch/um/include/asm/shmbuf.h new file mode 100644 index 0000000..9684d4a --- /dev/null +++ b/arch/um/include/asm/shmbuf.h @@ -0,0 +1,6 @@ +#ifndef __UM_SHMBUF_H +#define __UM_SHMBUF_H + +#include "asm/arch/shmbuf.h" + +#endif diff --git a/arch/um/include/asm/shmparam.h b/arch/um/include/asm/shmparam.h new file mode 100644 index 0000000..124c001 --- /dev/null +++ b/arch/um/include/asm/shmparam.h @@ -0,0 +1,6 @@ +#ifndef __UM_SHMPARAM_H +#define __UM_SHMPARAM_H + +#include "asm/arch/shmparam.h" + +#endif diff --git a/arch/um/include/asm/sigcontext-generic.h b/arch/um/include/asm/sigcontext-generic.h new file mode 100644 index 0000000..1645870 --- /dev/null +++ b/arch/um/include/asm/sigcontext-generic.h @@ -0,0 +1,6 @@ +#ifndef __UM_SIGCONTEXT_GENERIC_H +#define __UM_SIGCONTEXT_GENERIC_H + +#include "asm/arch/sigcontext.h" + +#endif diff --git a/arch/um/include/asm/sigcontext-i386.h b/arch/um/include/asm/sigcontext-i386.h new file mode 100644 index 0000000..b88333f --- /dev/null +++ b/arch/um/include/asm/sigcontext-i386.h @@ -0,0 +1,6 @@ +#ifndef __UM_SIGCONTEXT_I386_H +#define __UM_SIGCONTEXT_I386_H + +#include "asm/sigcontext-generic.h" + +#endif diff --git a/arch/um/include/asm/sigcontext-ppc.h b/arch/um/include/asm/sigcontext-ppc.h new file mode 100644 index 0000000..2467f20 --- /dev/null +++ b/arch/um/include/asm/sigcontext-ppc.h @@ -0,0 +1,10 @@ +#ifndef __UM_SIGCONTEXT_PPC_H +#define __UM_SIGCONTEXT_PPC_H + +#define pt_regs sys_pt_regs + +#include "asm/sigcontext-generic.h" + +#undef pt_regs + +#endif diff --git a/arch/um/include/asm/sigcontext-x86_64.h b/arch/um/include/asm/sigcontext-x86_64.h new file mode 100644 index 0000000..b600e0b --- /dev/null +++ b/arch/um/include/asm/sigcontext-x86_64.h @@ -0,0 +1,22 @@ +/* Copyright 2003 PathScale, Inc. + * + * Licensed under the GPL + */ + +#ifndef __UM_SIGCONTEXT_X86_64_H +#define __UM_SIGCONTEXT_X86_64_H + +#include "asm/sigcontext-generic.h" + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/siginfo.h b/arch/um/include/asm/siginfo.h new file mode 100644 index 0000000..bec6124 --- /dev/null +++ b/arch/um/include/asm/siginfo.h @@ -0,0 +1,6 @@ +#ifndef __UM_SIGINFO_H +#define __UM_SIGINFO_H + +#include "asm/arch/siginfo.h" + +#endif diff --git a/arch/um/include/asm/signal.h b/arch/um/include/asm/signal.h new file mode 100644 index 0000000..52ed92c --- /dev/null +++ b/arch/um/include/asm/signal.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef __UM_SIGNAL_H +#define __UM_SIGNAL_H + +/* Need to kill the do_signal() declaration in the i386 signal.h */ + +#define do_signal do_signal_renamed +#include "asm/arch/signal.h" +#undef do_signal +#undef ptrace_signal_deliver + +#define ptrace_signal_deliver(regs, cookie) do {} while(0) + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/smp.h b/arch/um/include/asm/smp.h new file mode 100644 index 0000000..f27a963 --- /dev/null +++ b/arch/um/include/asm/smp.h @@ -0,0 +1,33 @@ +#ifndef __UM_SMP_H +#define __UM_SMP_H + +#ifdef CONFIG_SMP + +#include "linux/bitops.h" +#include "asm/current.h" +#include "linux/cpumask.h" + +#define raw_smp_processor_id() (current_thread->cpu) + +#define cpu_logical_map(n) (n) +#define cpu_number_map(n) (n) +#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */ +extern int hard_smp_processor_id(void); +#define NO_PROC_ID -1 + +extern int ncpus; + + +static inline void smp_cpus_done(unsigned int maxcpus) +{ +} + +extern struct task_struct *idle_threads[NR_CPUS]; + +#else + +#define hard_smp_processor_id() 0 + +#endif + +#endif diff --git a/arch/um/include/asm/socket.h b/arch/um/include/asm/socket.h new file mode 100644 index 0000000..67886e4 --- /dev/null +++ b/arch/um/include/asm/socket.h @@ -0,0 +1,6 @@ +#ifndef __UM_SOCKET_H +#define __UM_SOCKET_H + +#include "asm/arch/socket.h" + +#endif diff --git a/arch/um/include/asm/sockios.h b/arch/um/include/asm/sockios.h new file mode 100644 index 0000000..93ee1c5 --- /dev/null +++ b/arch/um/include/asm/sockios.h @@ -0,0 +1,6 @@ +#ifndef __UM_SOCKIOS_H +#define __UM_SOCKIOS_H + +#include "asm/arch/sockios.h" + +#endif diff --git a/arch/um/include/asm/spinlock.h b/arch/um/include/asm/spinlock.h new file mode 100644 index 0000000..f18c828 --- /dev/null +++ b/arch/um/include/asm/spinlock.h @@ -0,0 +1,6 @@ +#ifndef __UM_SPINLOCK_H +#define __UM_SPINLOCK_H + +#include "asm/arch/spinlock.h" + +#endif diff --git a/arch/um/include/asm/spinlock_types.h b/arch/um/include/asm/spinlock_types.h new file mode 100644 index 0000000..e5a9429 --- /dev/null +++ b/arch/um/include/asm/spinlock_types.h @@ -0,0 +1,6 @@ +#ifndef __UM_SPINLOCK_TYPES_H +#define __UM_SPINLOCK_TYPES_H + +#include "asm/arch/spinlock_types.h" + +#endif diff --git a/arch/um/include/asm/stat.h b/arch/um/include/asm/stat.h new file mode 100644 index 0000000..83ed85a --- /dev/null +++ b/arch/um/include/asm/stat.h @@ -0,0 +1,6 @@ +#ifndef __UM_STAT_H +#define __UM_STAT_H + +#include "asm/arch/stat.h" + +#endif diff --git a/arch/um/include/asm/statfs.h b/arch/um/include/asm/statfs.h new file mode 100644 index 0000000..ba6fb53 --- /dev/null +++ b/arch/um/include/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _UM_STATFS_H +#define _UM_STATFS_H + +#include "asm/arch/statfs.h" + +#endif diff --git a/arch/um/include/asm/string.h b/arch/um/include/asm/string.h new file mode 100644 index 0000000..9a0571f6 --- /dev/null +++ b/arch/um/include/asm/string.h @@ -0,0 +1,7 @@ +#ifndef __UM_STRING_H +#define __UM_STRING_H + +#include "asm/arch/string.h" +#include "asm/archparam.h" + +#endif diff --git a/arch/um/include/asm/suspend.h b/arch/um/include/asm/suspend.h new file mode 100644 index 0000000..f4e8e00 --- /dev/null +++ b/arch/um/include/asm/suspend.h @@ -0,0 +1,4 @@ +#ifndef __UM_SUSPEND_H +#define __UM_SUSPEND_H + +#endif diff --git a/arch/um/include/asm/system-generic.h b/arch/um/include/asm/system-generic.h new file mode 100644 index 0000000..5bcfa35 --- /dev/null +++ b/arch/um/include/asm/system-generic.h @@ -0,0 +1,47 @@ +#ifndef __UM_SYSTEM_GENERIC_H +#define __UM_SYSTEM_GENERIC_H + +#include "asm/arch/system.h" + +#undef switch_to +#undef local_irq_save +#undef local_irq_restore +#undef local_irq_disable +#undef local_irq_enable +#undef local_save_flags +#undef local_irq_restore +#undef local_irq_enable +#undef local_irq_disable +#undef local_irq_save +#undef irqs_disabled + +extern void *switch_to(void *prev, void *next, void *last); + +extern int get_signals(void); +extern int set_signals(int enable); +extern int get_signals(void); +extern void block_signals(void); +extern void unblock_signals(void); + +#define local_save_flags(flags) do { typecheck(unsigned long, flags); \ + (flags) = get_signals(); } while(0) +#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \ + set_signals(flags); } while(0) + +#define local_irq_save(flags) do { local_save_flags(flags); \ + local_irq_disable(); } while(0) + +#define local_irq_enable() unblock_signals() +#define local_irq_disable() block_signals() + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + (flags == 0); \ +}) + +extern void *_switch_to(void *prev, void *next, void *last); +#define switch_to(prev, next, last) prev = _switch_to(prev, next, last) + +#endif diff --git a/arch/um/include/asm/system-i386.h b/arch/um/include/asm/system-i386.h new file mode 100644 index 0000000..c436263 --- /dev/null +++ b/arch/um/include/asm/system-i386.h @@ -0,0 +1,6 @@ +#ifndef __UM_SYSTEM_I386_H +#define __UM_SYSTEM_I386_H + +#include "asm/system-generic.h" + +#endif diff --git a/arch/um/include/asm/system-ppc.h b/arch/um/include/asm/system-ppc.h new file mode 100644 index 0000000..17cde66 --- /dev/null +++ b/arch/um/include/asm/system-ppc.h @@ -0,0 +1,12 @@ +#ifndef __UM_SYSTEM_PPC_H +#define __UM_SYSTEM_PPC_H + +#define _switch_to _ppc_switch_to + +#include "asm/arch/system.h" + +#undef _switch_to + +#include "asm/system-generic.h" + +#endif diff --git a/arch/um/include/asm/system-x86_64.h b/arch/um/include/asm/system-x86_64.h new file mode 100644 index 0000000..e1b61b5 --- /dev/null +++ b/arch/um/include/asm/system-x86_64.h @@ -0,0 +1,23 @@ +/* + * Copyright 2003 PathScale, Inc. + * + * Licensed under the GPL + */ + +#ifndef __UM_SYSTEM_X86_64_H +#define __UM_SYSTEM_X86_64_H + +#include "asm/system-generic.h" + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/asm/termbits.h b/arch/um/include/asm/termbits.h new file mode 100644 index 0000000..5739c60 --- /dev/null +++ b/arch/um/include/asm/termbits.h @@ -0,0 +1,6 @@ +#ifndef __UM_TERMBITS_H +#define __UM_TERMBITS_H + +#include "asm/arch/termbits.h" + +#endif diff --git a/arch/um/include/asm/termios.h b/arch/um/include/asm/termios.h new file mode 100644 index 0000000..d9f97b3 --- /dev/null +++ b/arch/um/include/asm/termios.h @@ -0,0 +1,6 @@ +#ifndef __UM_TERMIOS_H +#define __UM_TERMIOS_H + +#include "asm/arch/termios.h" + +#endif diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h new file mode 100644 index 0000000..62274ab --- /dev/null +++ b/arch/um/include/asm/thread_info.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ + +#ifndef __UM_THREAD_INFO_H +#define __UM_THREAD_INFO_H + +#ifndef __ASSEMBLY__ + +#include <asm/types.h> +#include <asm/page.h> +#include <asm/uaccess.h> + +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + __u32 cpu; /* current CPU */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ + mm_segment_t addr_limit; /* thread address space: + 0-0xBFFFFFFF for user + 0-0xFFFFFFFF for kernel */ + struct restart_block restart_block; + struct thread_info *real_thread; /* Points to non-IRQ stack */ +}; + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = 1, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ + .real_thread = NULL, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE) +/* how to get the thread information struct from C */ +static inline struct thread_info *current_thread_info(void) +{ + struct thread_info *ti; + unsigned long mask = THREAD_SIZE - 1; + ti = (struct thread_info *) (((unsigned long) &ti) & ~mask); + return ti; +} + +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER + +#endif + +#define PREEMPT_ACTIVE 0x10000000 + +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling + * TIF_NEED_RESCHED + */ +#define TIF_RESTART_BLOCK 4 +#define TIF_MEMDIE 5 +#define TIF_SYSCALL_AUDIT 6 +#define TIF_RESTORE_SIGMASK 7 +#define TIF_FREEZE 16 /* is freezing for suspend */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_MEMDIE (1 << TIF_MEMDIE) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_FREEZE (1 << TIF_FREEZE) + +#endif diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h new file mode 100644 index 0000000..0f4ada0 --- /dev/null +++ b/arch/um/include/asm/timex.h @@ -0,0 +1,13 @@ +#ifndef __UM_TIMEX_H +#define __UM_TIMEX_H + +typedef unsigned long cycles_t; + +static inline cycles_t get_cycles (void) +{ + return 0; +} + +#define CLOCK_TICK_RATE (HZ) + +#endif diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h new file mode 100644 index 0000000..5240fa1 --- /dev/null +++ b/arch/um/include/asm/tlb.h @@ -0,0 +1,127 @@ +#ifndef __UM_TLB_H +#define __UM_TLB_H + +#include <linux/pagemap.h> +#include <linux/swap.h> +#include <asm/percpu.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +/* struct mmu_gather is an opaque type used by the mm code for passing around + * any data needed by arch specific code for tlb_remove_page. + */ +struct mmu_gather { + struct mm_struct *mm; + unsigned int need_flush; /* Really unmapped some ptes? */ + unsigned long start; + unsigned long end; + unsigned int fullmm; /* non-zero means full mm flush */ +}; + +/* Users of the generic TLB shootdown code must declare this storage space. */ +DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); + +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, + unsigned long address) +{ + if (tlb->start > address) + tlb->start = address; + if (tlb->end < address + PAGE_SIZE) + tlb->end = address + PAGE_SIZE; +} + +static inline void init_tlb_gather(struct mmu_gather *tlb) +{ + tlb->need_flush = 0; + + tlb->start = TASK_SIZE; + tlb->end = 0; + + if (tlb->fullmm) { + tlb->start = 0; + tlb->end = TASK_SIZE; + } +} + +/* tlb_gather_mmu + * Return a pointer to an initialized struct mmu_gather. + */ +static inline struct mmu_gather * +tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) +{ + struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); + + tlb->mm = mm; + tlb->fullmm = full_mm_flush; + + init_tlb_gather(tlb); + + return tlb; +} + +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end); + +static inline void +tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + if (!tlb->need_flush) + return; + + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); + init_tlb_gather(tlb); +} + +/* tlb_finish_mmu + * Called at the end of the shootdown operation to free up any resources + * that were required. + */ +static inline void +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + tlb_flush_mmu(tlb, start, end); + + /* keep the page table cache within bounds */ + check_pgt_cache(); + + put_cpu_var(mmu_gathers); +} + +/* tlb_remove_page + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), + * while handling the additional races in SMP caused by other CPUs + * caching valid mappings in their TLBs. + */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + tlb->need_flush = 1; + free_page_and_swap_cache(page); + return; +} + +/** + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. + * + * Record the fact that pte's were really umapped in ->need_flush, so we can + * later optimise away the tlb invalidate. This helps when userspace is + * unmapping already-unmapped pages, which happens quite a lot. + */ +#define tlb_remove_tlb_entry(tlb, ptep, address) \ + do { \ + tlb->need_flush = 1; \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ + } while (0) + +#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep) + +#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp) + +#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp) + +#define tlb_migrate_finish(mm) do {} while (0) + +#endif diff --git a/arch/um/include/asm/tlbflush.h b/arch/um/include/asm/tlbflush.h new file mode 100644 index 0000000..614f2c0 --- /dev/null +++ b/arch/um/include/asm/tlbflush.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Licensed under the GPL + */ + +#ifndef __UM_TLBFLUSH_H +#define __UM_TLBFLUSH_H + +#include <linux/mm.h> + +/* + * TLB flushing: + * + * - flush_tlb() flushes the current mm struct TLBs + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_kernel_vm() flushes the kernel vm area + * - flush_tlb_range(vma, start, end) flushes a range of pages + */ + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address); +extern void flush_tlb_kernel_vm(void); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void __flush_tlb_one(unsigned long addr); + +#endif diff --git a/arch/um/include/asm/topology.h b/arch/um/include/asm/topology.h new file mode 100644 index 0000000..0905e4f --- /dev/null +++ b/arch/um/include/asm/topology.h @@ -0,0 +1,6 @@ +#ifndef _ASM_UM_TOPOLOGY_H +#define _ASM_UM_TOPOLOGY_H + +#include <asm-generic/topology.h> + +#endif diff --git a/arch/um/include/asm/types.h b/arch/um/include/asm/types.h new file mode 100644 index 0000000..816e959 --- /dev/null +++ b/arch/um/include/asm/types.h @@ -0,0 +1,6 @@ +#ifndef __UM_TYPES_H +#define __UM_TYPES_H + +#include "asm/arch/types.h" + +#endif diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h new file mode 100644 index 0000000..b9a895d --- /dev/null +++ b/arch/um/include/asm/uaccess.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef __UM_UACCESS_H +#define __UM_UACCESS_H + +#include <asm/errno.h> +#include <asm/processor.h> + +/* thread_info has a mm_segment_t in it, so put the definition up here */ +typedef struct { + unsigned long seg; +} mm_segment_t; + +#include "linux/thread_info.h" + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) +#define USER_DS MAKE_MM_SEG(TASK_SIZE) + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a, b) ((a).seg == (b).seg) + +#include "um_uaccess.h" + +#define __copy_from_user(to, from, n) copy_from_user(to, from, n) + +#define __copy_to_user(to, from, n) copy_to_user(to, from, n) + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + +#define __get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \ + __typeof__(x) __private_val; \ + int __private_ret = -EFAULT; \ + (x) = (__typeof__(*(__private_ptr)))0; \ + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\ + sizeof(*(__private_ptr))) == 0) { \ + (x) = (__typeof__(*(__private_ptr))) __private_val; \ + __private_ret = 0; \ + } \ + __private_ret; \ +}) + +#define get_user(x, ptr) \ +({ \ + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \ + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \ + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \ +}) + +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__private_ptr = ptr; \ + __typeof__(*(__private_ptr)) __private_val; \ + int __private_ret = -EFAULT; \ + __private_val = (__typeof__(*(__private_ptr))) (x); \ + if (__copy_to_user((__private_ptr), &__private_val, \ + sizeof(*(__private_ptr))) == 0) { \ + __private_ret = 0; \ + } \ + __private_ret; \ +}) + +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *private_ptr = (ptr); \ + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \ + __put_user(x, private_ptr) : -EFAULT); \ +}) + +#define strlen_user(str) strnlen_user(str, ~0U >> 1) + +struct exception_table_entry +{ + unsigned long insn; + unsigned long fixup; +}; + +#endif diff --git a/arch/um/include/asm/ucontext.h b/arch/um/include/asm/ucontext.h new file mode 100644 index 0000000..5c96c0e --- /dev/null +++ b/arch/um/include/asm/ucontext.h @@ -0,0 +1,6 @@ +#ifndef _ASM_UM_UCONTEXT_H +#define _ASM_UM_UCONTEXT_H + +#include "asm/arch/ucontext.h" + +#endif diff --git a/arch/um/include/asm/unaligned.h b/arch/um/include/asm/unaligned.h new file mode 100644 index 0000000..a471969 --- /dev/null +++ b/arch/um/include/asm/unaligned.h @@ -0,0 +1,6 @@ +#ifndef _ASM_UM_UNALIGNED_H +#define _ASM_UM_UNALIGNED_H + +#include "asm/arch/unaligned.h" + +#endif /* _ASM_UM_UNALIGNED_H */ diff --git a/arch/um/include/asm/unistd.h b/arch/um/include/asm/unistd.h new file mode 100644 index 0000000..38bd9d9 --- /dev/null +++ b/arch/um/include/asm/unistd.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2000 - 2004 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef _UM_UNISTD_H_ +#define _UM_UNISTD_H_ + +#include <linux/syscalls.h> +#include "linux/resource.h" +#include "asm/uaccess.h" + +extern int um_execve(const char *file, char *const argv[], char *const env[]); + +#ifdef __KERNEL__ +/* We get __ARCH_WANT_OLD_STAT and __ARCH_WANT_STAT64 from the base arch */ +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND +#endif + +#include "asm/arch/unistd.h" + +#endif /* _UM_UNISTD_H_*/ diff --git a/arch/um/include/asm/user.h b/arch/um/include/asm/user.h new file mode 100644 index 0000000..aae414e --- /dev/null +++ b/arch/um/include/asm/user.h @@ -0,0 +1,6 @@ +#ifndef __UM_USER_H +#define __UM_USER_H + +#include "asm/arch/user.h" + +#endif diff --git a/arch/um/include/asm/vga.h b/arch/um/include/asm/vga.h new file mode 100644 index 0000000..903a592 --- /dev/null +++ b/arch/um/include/asm/vga.h @@ -0,0 +1,6 @@ +#ifndef __UM_VGA_H +#define __UM_VGA_H + +#include "asm/arch/vga.h" + +#endif diff --git a/arch/um/include/asm/vm-flags-i386.h b/arch/um/include/asm/vm-flags-i386.h new file mode 100644 index 0000000..e0d24c5 --- /dev/null +++ b/arch/um/include/asm/vm-flags-i386.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) + * Licensed under the GPL + */ + +#ifndef __VM_FLAGS_I386_H +#define __VM_FLAGS_I386_H + +#define VM_DATA_DEFAULT_FLAGS \ + (VM_READ | VM_WRITE | \ + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#endif diff --git a/arch/um/include/asm/vm-flags-x86_64.h b/arch/um/include/asm/vm-flags-x86_64.h new file mode 100644 index 0000000..3213edf --- /dev/null +++ b/arch/um/include/asm/vm-flags-x86_64.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) + * Copyright 2003 PathScale, Inc. + * Licensed under the GPL + */ + +#ifndef __VM_FLAGS_X86_64_H +#define __VM_FLAGS_X86_64_H + +#define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \ + VM_EXEC | VM_MAYREAD | VM_MAYWRITE | \ + VM_MAYEXEC) + +extern unsigned long vm_stack_flags, vm_stack_flags32; +extern unsigned long vm_data_default_flags, vm_data_default_flags32; +extern unsigned long vm_force_exec32; + +#ifdef TIF_IA32 +#define VM_DATA_DEFAULT_FLAGS \ + (test_thread_flag(TIF_IA32) ? vm_data_default_flags32 : \ + vm_data_default_flags) + +#define VM_STACK_DEFAULT_FLAGS \ + (test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags) +#endif + +#define VM_DATA_DEFAULT_FLAGS vm_data_default_flags + +#define VM_STACK_DEFAULT_FLAGS vm_stack_flags + +#endif diff --git a/arch/um/include/asm/vm86.h b/arch/um/include/asm/vm86.h new file mode 100644 index 0000000..7801f82 --- /dev/null +++ b/arch/um/include/asm/vm86.h @@ -0,0 +1,6 @@ +#ifndef __UM_VM86_H +#define __UM_VM86_H + +#include "asm/arch/vm86.h" + +#endif diff --git a/arch/um/include/asm/xor.h b/arch/um/include/asm/xor.h new file mode 100644 index 0000000..a19db3e --- /dev/null +++ b/arch/um/include/asm/xor.h @@ -0,0 +1,6 @@ +#ifndef __UM_XOR_H +#define __UM_XOR_H + +#include "asm-generic/xor.h" + +#endif |