diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m32r/mm | |
download | op-kernel-dev-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip op-kernel-dev-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/m32r/mm')
-rw-r--r-- | arch/m32r/mm/Makefile | 12 | ||||
-rw-r--r-- | arch/m32r/mm/cache.c | 65 | ||||
-rw-r--r-- | arch/m32r/mm/discontig.c | 171 | ||||
-rw-r--r-- | arch/m32r/mm/extable.c | 22 | ||||
-rw-r--r-- | arch/m32r/mm/fault-nommu.c | 165 | ||||
-rw-r--r-- | arch/m32r/mm/fault.c | 583 | ||||
-rw-r--r-- | arch/m32r/mm/init.c | 247 | ||||
-rw-r--r-- | arch/m32r/mm/ioremap-nommu.c | 52 | ||||
-rw-r--r-- | arch/m32r/mm/ioremap.c | 192 | ||||
-rw-r--r-- | arch/m32r/mm/mmu.S | 350 | ||||
-rw-r--r-- | arch/m32r/mm/page.S | 82 |
11 files changed, 1941 insertions, 0 deletions
diff --git a/arch/m32r/mm/Makefile b/arch/m32r/mm/Makefile new file mode 100644 index 0000000..c51c1c3 --- /dev/null +++ b/arch/m32r/mm/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the Linux M32R-specific parts of the memory manager. +# + +ifdef CONFIG_MMU +obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o +else +obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o +endif + +obj-$(CONFIG_DISCONTIGMEM) += discontig.o + diff --git a/arch/m32r/mm/cache.c b/arch/m32r/mm/cache.c new file mode 100644 index 0000000..31b0789 --- /dev/null +++ b/arch/m32r/mm/cache.c @@ -0,0 +1,65 @@ +/* + * linux/arch/m32r/mm/cache.c + * + * Copyright (C) 2002 Hirokazu Takata + */ + +#include <linux/config.h> +#include <asm/pgtable.h> + +#undef MCCR + +#if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP) +/* Cache Control Register */ +#define MCCR ((volatile unsigned long*)0xfffffffc) +#define MCCR_CC (1UL << 7) /* Cache mode modify bit */ +#define MCCR_IIV (1UL << 6) /* I-cache invalidate */ +#define MCCR_DIV (1UL << 5) /* D-cache invalidate */ +#define MCCR_DCB (1UL << 4) /* D-cache copy back */ +#define MCCR_ICM (1UL << 1) /* I-cache mode [0:off,1:on] */ +#define MCCR_DCM (1UL << 0) /* D-cache mode [0:off,1:on] */ +#define MCCR_ICACHE_INV (MCCR_CC|MCCR_IIV) +#define MCCR_DCACHE_CB (MCCR_CC|MCCR_DCB) +#define MCCR_DCACHE_CBINV (MCCR_CC|MCCR_DIV|MCCR_DCB) +#define CHECK_MCCR(mccr) (mccr = *MCCR) +#elif defined(CONFIG_CHIP_M32102) +#define MCCR ((volatile unsigned char*)0xfffffffe) +#define MCCR_IIV (1UL << 0) /* I-cache invalidate */ +#define MCCR_ICACHE_INV MCCR_IIV +#endif /* CONFIG_CHIP_XNUX2 || CONFIG_CHIP_M32700 */ + +#ifndef MCCR +#error Unknown cache type. +#endif + + +/* Copy back and invalidate D-cache and invalidate I-cache all */ +void _flush_cache_all(void) +{ +#if defined(CONFIG_CHIP_M32102) + *MCCR = MCCR_ICACHE_INV; +#else + unsigned long mccr; + + /* Copyback and invalidate D-cache */ + /* Invalidate I-cache */ + *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CBINV; + while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ +#endif +} + +/* Copy back D-cache and invalidate I-cache all */ +void _flush_cache_copyback_all(void) +{ +#if defined(CONFIG_CHIP_M32102) + *MCCR = MCCR_ICACHE_INV; +#else + unsigned long mccr; + + /* Copyback D-cache */ + /* Invalidate I-cache */ + *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CB; + while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ + +#endif +} diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c new file mode 100644 index 0000000..1d1a01e --- /dev/null +++ b/arch/m32r/mm/discontig.c @@ -0,0 +1,171 @@ +/* + * linux/arch/m32r/mm/discontig.c + * + * Discontig memory support + * + * Copyright (c) 2003 Hitoshi Yamamoto + */ + +#include <linux/config.h> +#include <linux/mm.h> +#include <linux/bootmem.h> +#include <linux/mmzone.h> +#include <linux/initrd.h> +#include <linux/nodemask.h> + +#include <asm/setup.h> + +extern char _end[]; + +struct pglist_data *node_data[MAX_NUMNODES]; +static bootmem_data_t node_bdata[MAX_NUMNODES] __initdata; + +pg_data_t m32r_node_data[MAX_NUMNODES]; + +/* Memory profile */ +typedef struct { + unsigned long start_pfn; + unsigned long pages; + unsigned long holes; + unsigned long free_pfn; +} mem_prof_t; +static mem_prof_t mem_prof[MAX_NUMNODES]; + +static void __init mem_prof_init(void) +{ + unsigned long start_pfn, holes, free_pfn; + const unsigned long zone_alignment = 1UL << (MAX_ORDER - 1); + unsigned long ul; + mem_prof_t *mp; + + /* Node#0 SDRAM */ + mp = &mem_prof[0]; + mp->start_pfn = PFN_UP(CONFIG_MEMORY_START); + mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE); + mp->holes = 0; + mp->free_pfn = PFN_UP(__pa(_end)); + + /* Node#1 internal SRAM */ + mp = &mem_prof[1]; + start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START); + holes = 0; + if (start_pfn & (zone_alignment - 1)) { + ul = zone_alignment; + while (start_pfn >= ul) + ul += zone_alignment; + + start_pfn = ul - zone_alignment; + holes = free_pfn - start_pfn; + } + + mp->start_pfn = start_pfn; + mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes; + mp->holes = holes; + mp->free_pfn = PFN_UP(CONFIG_IRAM_START); +} + +unsigned long __init setup_memory(void) +{ + unsigned long bootmap_size; + unsigned long min_pfn; + int nid; + mem_prof_t *mp; + + max_low_pfn = 0; + min_low_pfn = -1; + + mem_prof_init(); + + for_each_online_node(nid) { + mp = &mem_prof[nid]; + NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid]; + NODE_DATA(nid)->bdata = &node_bdata[nid]; + min_pfn = mp->start_pfn; + max_pfn = mp->start_pfn + mp->pages; + bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn, + mp->start_pfn, max_pfn); + + free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn), + PFN_PHYS(mp->pages)); + + reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn), + PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size); + + if (max_low_pfn < max_pfn) + max_low_pfn = max_pfn; + + if (min_low_pfn > min_pfn) + min_low_pfn = min_pfn; + } + +#ifdef CONFIG_BLK_DEV_INITRD + if (LOADER_TYPE && INITRD_START) { + if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) { + reserve_bootmem_node(NODE_DATA(0), INITRD_START, + INITRD_SIZE); + initrd_start = INITRD_START ? + INITRD_START + PAGE_OFFSET : 0; + + initrd_end = initrd_start + INITRD_SIZE; + printk("initrd:start[%08lx],size[%08lx]\n", + initrd_start, INITRD_SIZE); + } else { + printk("initrd extends beyond end of memory " + "(0x%08lx > 0x%08lx)\ndisabling initrd\n", + INITRD_START + INITRD_SIZE, + PFN_PHYS(max_low_pfn)); + + initrd_start = 0; + } + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + return max_low_pfn; +} + +#define START_PFN(nid) \ + (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT) +#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) + +unsigned long __init zone_sizes_init(void) +{ + unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES]; + unsigned long low, start_pfn; + unsigned long holes = 0; + int nid, i; + mem_prof_t *mp; + + pgdat_list = NULL; + for (nid = num_online_nodes() - 1 ; nid >= 0 ; nid--) { + NODE_DATA(nid)->pgdat_next = pgdat_list; + pgdat_list = NODE_DATA(nid); + } + + for_each_online_node(nid) { + mp = &mem_prof[nid]; + for (i = 0 ; i < MAX_NR_ZONES ; i++) { + zones_size[i] = 0; + zholes_size[i] = 0; + } + start_pfn = START_PFN(nid); + low = MAX_LOW_PFN(nid); + zones_size[ZONE_DMA] = low - start_pfn; + zholes_size[ZONE_DMA] = mp->holes; + holes += zholes_size[ZONE_DMA]; + + free_area_init_node(nid, NODE_DATA(nid), zones_size, + start_pfn, zholes_size); + } + + /* + * For test + * Use all area of internal RAM. + * see __alloc_pages() + */ + NODE_DATA(1)->node_zones->pages_min = 0; + NODE_DATA(1)->node_zones->pages_low = 0; + NODE_DATA(1)->node_zones->pages_high = 0; + + return holes; +} + diff --git a/arch/m32r/mm/extable.c b/arch/m32r/mm/extable.c new file mode 100644 index 0000000..9a97363 --- /dev/null +++ b/arch/m32r/mm/extable.c @@ -0,0 +1,22 @@ +/* + * linux/arch/i386/mm/extable.c + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <asm/uaccess.h> + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(regs->bpc); + if (fixup) { + regs->bpc = fixup->fixup; + return 1; + } + + return 0; +} + diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c new file mode 100644 index 0000000..d9d488d --- /dev/null +++ b/arch/m32r/mm/fault-nommu.c @@ -0,0 +1,165 @@ +/* + * linux/arch/m32r/mm/fault.c + * + * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo + * + * Some code taken from i386 version. + * Copyright (C) 1995 Linus Torvalds + */ + +/* $Id: fault-nommu.c,v 1.1 2004/03/30 06:40:59 sakugawa Exp $ */ + +#include <linux/config.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/vt_kern.h> /* For unblank_screen() */ + +#include <asm/m32r.h> +#include <asm/system.h> +#include <asm/uaccess.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/hardirq.h> +#include <asm/mmu_context.h> + +extern void die(const char *, struct pt_regs *, long); + +#ifndef CONFIG_SMP +asmlinkage unsigned int tlb_entry_i_dat; +asmlinkage unsigned int tlb_entry_d_dat; +#define tlb_entry_i tlb_entry_i_dat +#define tlb_entry_d tlb_entry_d_dat +#else +unsigned int tlb_entry_i_dat[NR_CPUS]; +unsigned int tlb_entry_d_dat[NR_CPUS]; +#define tlb_entry_i tlb_entry_i_dat[smp_processor_id()] +#define tlb_entry_d tlb_entry_d_dat[smp_processor_id()] +#endif + +/* + * Unlock any spinlocks which will prevent us from getting the + * message out + */ +void bust_spinlocks(int yes) +{ + int loglevel_save = console_loglevel; + + if (yes) { + oops_in_progress = 1; + return; + } +#ifdef CONFIG_VT + unblank_screen(); +#endif + oops_in_progress = 0; + /* + * OK, the message is on the console. Now we call printk() + * without oops_in_progress set so that printk will give klogd + * a poke. Hold onto your hats... + */ + console_loglevel = 15; /* NMI oopser may have shut the console up */ + printk(" "); + console_loglevel = loglevel_save; +} + +void do_BUG(const char *file, int line) +{ + bust_spinlocks(1); + printk("kernel BUG at %s:%d!\n", file, line); +} + +/*======================================================================* + * do_page_fault() + *======================================================================* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + * + * ARGUMENT: + * regs : M32R SP reg. + * error_code : See below + * address : M32R MMU MDEVA reg. (Operand ACE) + * : M32R BPC reg. (Instruction ACE) + * + * error_code : + * bit 0 == 0 means no page found, 1 means protection fault + * bit 1 == 0 means read, 1 means write + * bit 2 == 0 means kernel, 1 means user-mode + *======================================================================*/ +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + +/* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + + bust_spinlocks(1); + + if (address < PAGE_SIZE) + printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + else + printk(KERN_ALERT "Unable to handle kernel paging request"); + printk(" at virtual address %08lx\n",address); + printk(" printing bpc:\n"); + printk(KERN_ALERT "bpc = %08lx\n", regs->bpc); + + die("Oops", regs, error_code); + bust_spinlocks(0); + do_exit(SIGKILL); +} + +/*======================================================================* + * update_mmu_cache() + *======================================================================*/ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t pte) +{ + BUG(); +} + +/*======================================================================* + * flush_tlb_page() : flushes one page + *======================================================================*/ +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + BUG(); +} + +/*======================================================================* + * flush_tlb_range() : flushes a range of pages + *======================================================================*/ +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + BUG(); +} + +/*======================================================================* + * flush_tlb_mm() : flushes the specified mm context TLB's + *======================================================================*/ +void local_flush_tlb_mm(struct mm_struct *mm) +{ + BUG(); +} + +/*======================================================================* + * flush_tlb_all() : flushes all processes TLBs + *======================================================================*/ +void local_flush_tlb_all(void) +{ + BUG(); +} + diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c new file mode 100644 index 0000000..bf7fb58 --- /dev/null +++ b/arch/m32r/mm/fault.c @@ -0,0 +1,583 @@ +/* + * linux/arch/m32r/mm/fault.c + * + * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo + * Copyright (c) 2004 Naoto Sugai, NIIBE Yutaka + * + * Some code taken from i386 version. + * Copyright (C) 1995 Linus Torvalds + */ + +#include <linux/config.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/tty.h> +#include <linux/vt_kern.h> /* For unblank_screen() */ +#include <linux/highmem.h> +#include <linux/module.h> + +#include <asm/m32r.h> +#include <asm/system.h> +#include <asm/uaccess.h> +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> + +extern void die(const char *, struct pt_regs *, long); + +#ifndef CONFIG_SMP +asmlinkage unsigned int tlb_entry_i_dat; +asmlinkage unsigned int tlb_entry_d_dat; +#define tlb_entry_i tlb_entry_i_dat +#define tlb_entry_d tlb_entry_d_dat +#else +unsigned int tlb_entry_i_dat[NR_CPUS]; +unsigned int tlb_entry_d_dat[NR_CPUS]; +#define tlb_entry_i tlb_entry_i_dat[smp_processor_id()] +#define tlb_entry_d tlb_entry_d_dat[smp_processor_id()] +#endif + +extern void init_tlb(void); + +/* + * Unlock any spinlocks which will prevent us from getting the + * message out + */ +void bust_spinlocks(int yes) +{ + int loglevel_save = console_loglevel; + + if (yes) { + oops_in_progress = 1; + return; + } +#ifdef CONFIG_VT + unblank_screen(); +#endif + oops_in_progress = 0; + /* + * OK, the message is on the console. Now we call printk() + * without oops_in_progress set so that printk will give klogd + * a poke. Hold onto your hats... + */ + console_loglevel = 15; /* NMI oopser may have shut the console up */ + printk(" "); + console_loglevel = loglevel_save; +} + +/*======================================================================* + * do_page_fault() + *======================================================================* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + * + * ARGUMENT: + * regs : M32R SP reg. + * error_code : See below + * address : M32R MMU MDEVA reg. (Operand ACE) + * : M32R BPC reg. (Instruction ACE) + * + * error_code : + * bit 0 == 0 means no page found, 1 means protection fault + * bit 1 == 0 means read, 1 means write + * bit 2 == 0 means kernel, 1 means user-mode + * bit 3 == 0 means data, 1 means instruction + *======================================================================*/ +#define ACE_PROTECTION 1 +#define ACE_WRITE 2 +#define ACE_USERMODE 4 +#define ACE_INSTRUCTION 8 + +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct * vma; + unsigned long page, addr; + int write; + siginfo_t info; + + /* + * If BPSW IE bit enable --> set PSW IE bit + */ + if (regs->psw & M32R_PSW_BIE) + local_irq_enable(); + + tsk = current; + + info.si_code = SEGV_MAPERR; + + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + * + * This verifies that the fault happens in kernel space + * (error_code & ACE_USERMODE) == 0, and that the fault was not a + * protection error (error_code & ACE_PROTECTION) == 0. + */ + if (address >= TASK_SIZE && !(error_code & ACE_USERMODE)) + goto vmalloc_fault; + + mm = tsk->mm; + + /* + * If we're in an interrupt or have no user context or are running in an + * atomic region then we must not take the fault.. + */ + if (in_atomic() || !mm) + goto bad_area_nosemaphore; + + /* When running in the kernel we expect faults to occur only to + * addresses in user space. All other faults represent errors in the + * kernel and should generate an OOPS. Unfortunatly, in the case of an + * erroneous fault occuring in a code path which already holds mmap_sem + * we will deadlock attempting to validate the fault against the + * address space. Luckily the kernel only validly references user + * space from well defined areas of code, which are listed in the + * exceptions table. + * + * As the vast majority of faults will be valid we will only perform + * the source reference check when there is a possibilty of a deadlock. + * Attempt to lock the address space, if we cannot we then validate the + * source. If this is invalid we can skip the address space check, + * thus avoiding the deadlock. + */ + if (!down_read_trylock(&mm->mmap_sem)) { + if ((error_code & ACE_USERMODE) == 0 && + !search_exception_tables(regs->psw)) + goto bad_area_nosemaphore; + down_read(&mm->mmap_sem); + } + + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; +#if 0 + if (error_code & ACE_USERMODE) { + /* + * accessing the stack below "spu" is always a bug. + * The "+ 4" is there due to the push instruction + * doing pre-decrement on the stack and that + * doesn't show up until later.. + */ + if (address + 4 < regs->spu) + goto bad_area; + } +#endif + if (expand_stack(vma, address)) + goto bad_area; +/* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + info.si_code = SEGV_ACCERR; + write = 0; + switch (error_code & (ACE_WRITE|ACE_PROTECTION)) { + default: /* 3: write, present */ + /* fall through */ + case ACE_WRITE: /* write, not present */ + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + write++; + break; + case ACE_PROTECTION: /* read, present */ + case 0: /* read, not present */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; + } + + /* + * For instruction access exception, check if the area is executable + */ + if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC)) + goto bad_area; + +survive: + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + addr = (address & PAGE_MASK); + set_thread_fault_code(error_code); + switch (handle_mm_fault(mm, vma, addr, write)) { + case VM_FAULT_MINOR: + tsk->min_flt++; + break; + case VM_FAULT_MAJOR: + tsk->maj_flt++; + break; + case VM_FAULT_SIGBUS: + goto do_sigbus; + case VM_FAULT_OOM: + goto out_of_memory; + default: + BUG(); + } + set_thread_fault_code(0); + up_read(&mm->mmap_sem); + return; + +/* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + up_read(&mm->mmap_sem); + +bad_area_nosemaphore: + /* User mode accesses just cause a SIGSEGV */ + if (error_code & ACE_USERMODE) { + tsk->thread.address = address; + tsk->thread.error_code = error_code | (address >= TASK_SIZE); + tsk->thread.trap_no = 14; + info.si_signo = SIGSEGV; + info.si_errno = 0; + /* info.si_code has been set above */ + info.si_addr = (void __user *)address; + force_sig_info(SIGSEGV, &info, tsk); + return; + } + +no_context: + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + +/* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + + bust_spinlocks(1); + + if (address < PAGE_SIZE) + printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + else + printk(KERN_ALERT "Unable to handle kernel paging request"); + printk(" at virtual address %08lx\n",address); + printk(KERN_ALERT " printing bpc:\n"); + printk("%08lx\n", regs->bpc); + page = *(unsigned long *)MPTB; + page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; + printk(KERN_ALERT "*pde = %08lx\n", page); + if (page & _PAGE_PRESENT) { + page &= PAGE_MASK; + address &= 0x003ff000; + page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; + printk(KERN_ALERT "*pte = %08lx\n", page); + } + die("Oops", regs, error_code); + bust_spinlocks(0); + do_exit(SIGKILL); + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +out_of_memory: + up_read(&mm->mmap_sem); + if (tsk->pid == 1) { + yield(); + down_read(&mm->mmap_sem); + goto survive; + } + printk("VM: killing process %s\n", tsk->comm); + if (error_code & ACE_USERMODE) + do_exit(SIGKILL); + goto no_context; + +do_sigbus: + up_read(&mm->mmap_sem); + + /* Kernel mode? Handle exception or die */ + if (!(error_code & ACE_USERMODE)) + goto no_context; + + tsk->thread.address = address; + tsk->thread.error_code = error_code; + tsk->thread.trap_no = 14; + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void __user *)address; + force_sig_info(SIGBUS, &info, tsk); + return; + +vmalloc_fault: + { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = pgd_index(address); + pgd_t *pgd, *pgd_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + pgd = (pgd_t *)*(unsigned long *)MPTB; + pgd = offset + (pgd_t *)pgd; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd_k)) + goto no_context; + + /* + * set_pgd(pgd, *pgd_k); here would be useless on PAE + * and redundant with the set_pmd() on non-PAE. + */ + + pmd = pmd_offset(pgd, address); + pmd_k = pmd_offset(pgd_k, address); + if (!pmd_present(*pmd_k)) + goto no_context; + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + goto no_context; + + addr = (address & PAGE_MASK) | (error_code & ACE_INSTRUCTION); + update_mmu_cache(NULL, addr, *pte_k); + return; + } +} + +/*======================================================================* + * update_mmu_cache() + *======================================================================*/ +#define TLB_MASK (NR_TLB_ENTRIES - 1) +#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) +#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, + pte_t pte) +{ + unsigned long *entry1, *entry2; + unsigned long pte_data, flags; + unsigned int *entry_dat; + int inst = get_thread_fault_code() & ACE_INSTRUCTION; + int i; + + /* Ptrace may call this routine. */ + if (vma && current->active_mm != vma->vm_mm) + return; + + local_irq_save(flags); + + vaddr = (vaddr & PAGE_MASK) | get_asid(); + +#ifdef CONFIG_CHIP_OPSP + entry1 = (unsigned long *)ITLB_BASE; + for(i = 0 ; i < NR_TLB_ENTRIES; i++) { + if(*entry1++ == vaddr) { + pte_data = pte_val(pte); + set_tlb_data(entry1, pte_data); + break; + } + entry1++; + } + entry2 = (unsigned long *)DTLB_BASE; + for(i = 0 ; i < NR_TLB_ENTRIES ; i++) { + if(*entry2++ == vaddr) { + pte_data = pte_val(pte); + set_tlb_data(entry2, pte_data); + break; + } + entry2++; + } + local_irq_restore(flags); + return; +#else + pte_data = pte_val(pte); + + /* + * Update TLB entries + * entry1: ITLB entry address + * entry2: DTLB entry address + */ + __asm__ __volatile__ ( + "seth %0, #high(%4) \n\t" + "st %2, @(%5, %0) \n\t" + "ldi %1, #1 \n\t" + "st %1, @(%6, %0) \n\t" + "add3 r4, %0, %7 \n\t" + ".fillinsn \n" + "1: \n\t" + "ld %1, @(%6, %0) \n\t" + "bnez %1, 1b \n\t" + "ld %0, @r4+ \n\t" + "ld %1, @r4 \n\t" + "st %3, @+%0 \n\t" + "st %3, @+%1 \n\t" + : "=&r" (entry1), "=&r" (entry2) + : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE), + "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset) + : "r4", "memory" + ); + + if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END)) + goto notfound; + +found: + local_irq_restore(flags); + + return; + + /* Valid entry not found */ +notfound: + /* + * Update ITLB or DTLB entry + * entry1: TLB entry address + * entry2: TLB base address + */ + if (!inst) { + entry2 = (unsigned long *)DTLB_BASE; + entry_dat = &tlb_entry_d; + } else { + entry2 = (unsigned long *)ITLB_BASE; + entry_dat = &tlb_entry_i; + } + entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1); + + for (i = 0 ; i < NR_TLB_ENTRIES ; i++) { + if (!(entry1[1] & 2)) /* Valid bit check */ + break; + + if (entry1 != entry2) + entry1 -= 2; + else + entry1 += TLB_MASK << 1; + } + + if (i >= NR_TLB_ENTRIES) { /* Empty entry not found */ + entry1 = entry2 + (*entry_dat << 1); + *entry_dat = (*entry_dat + 1) & TLB_MASK; + } + *entry1++ = vaddr; /* Set TLB tag */ + set_tlb_data(entry1, pte_data); + + goto found; +#endif +} + +/*======================================================================* + * flush_tlb_page() : flushes one page + *======================================================================*/ +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) { + unsigned long flags; + + local_irq_save(flags); + page &= PAGE_MASK; + page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK); + __flush_tlb_page(page); + local_irq_restore(flags); + } +} + +/*======================================================================* + * flush_tlb_range() : flushes a range of pages + *======================================================================*/ +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm; + + mm = vma->vm_mm; + if (mm_context(mm) != NO_CONTEXT) { + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */ + mm_context(mm) = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + } else { + unsigned long asid; + + asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK; + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + + start |= asid; + end |= asid; + while (start < end) { + __flush_tlb_page(start); + start += PAGE_SIZE; + } + } + local_irq_restore(flags); + } +} + +/*======================================================================* + * flush_tlb_mm() : flushes the specified mm context TLB's + *======================================================================*/ +void local_flush_tlb_mm(struct mm_struct *mm) +{ + /* Invalidate all TLB of this process. */ + /* Instead of invalidating each TLB, we get new MMU context. */ + if (mm_context(mm) != NO_CONTEXT) { + unsigned long flags; + + local_irq_save(flags); + mm_context(mm) = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + local_irq_restore(flags); + } +} + +/*======================================================================* + * flush_tlb_all() : flushes all processes TLBs + *======================================================================*/ +void local_flush_tlb_all(void) +{ + unsigned long flags; + + local_irq_save(flags); + __flush_tlb_all(); + local_irq_restore(flags); +} + +/*======================================================================* + * init_mmu() + *======================================================================*/ +void __init init_mmu(void) +{ + tlb_entry_i = 0; + tlb_entry_d = 0; + mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; + set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); + *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir; +} diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c new file mode 100644 index 0000000..bc423d8 --- /dev/null +++ b/arch/m32r/mm/init.c @@ -0,0 +1,247 @@ +/* + * linux/arch/m32r/mm/init.c + * + * Copyright (c) 2001, 2002 Hitoshi Yamamoto + * + * Some code taken from sh version. + * Copyright (C) 1999 Niibe Yutaka + * Based on linux/arch/i386/mm/init.c: + * Copyright (C) 1995 Linus Torvalds + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/bootmem.h> +#include <linux/swap.h> +#include <linux/highmem.h> +#include <linux/bitops.h> +#include <linux/nodemask.h> +#include <asm/types.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/mmu_context.h> +#include <asm/setup.h> +#include <asm/tlb.h> + +/* References to section boundaries */ +extern char _text, _etext, _edata; +extern char __init_begin, __init_end; + +pgd_t swapper_pg_dir[1024]; + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +void show_mem(void) +{ + int total = 0, reserved = 0; + int shared = 0, cached = 0; + int highmem = 0; + struct page *page; + pg_data_t *pgdat; + unsigned long i; + + printk("Mem-info:\n"); + show_free_areas(); + printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); + for_each_pgdat(pgdat) { + for (i = 0; i < pgdat->node_spanned_pages; ++i) { + page = pgdat->node_mem_map + i; + total++; + if (PageHighMem(page)) + highmem++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (page_count(page)) + shared += page_count(page) - 1; + } + } + printk("%d pages of RAM\n", total); + printk("%d pages of HIGHMEM\n",highmem); + printk("%d reserved pages\n",reserved); + printk("%d pages shared\n",shared); + printk("%d pages swap cached\n",cached); +} + +/* + * Cache of MMU context last used. + */ +#ifndef CONFIG_SMP +unsigned long mmu_context_cache_dat; +#else +unsigned long mmu_context_cache_dat[NR_CPUS]; +#endif +static unsigned long hole_pages; + +/* + * function prototype + */ +void __init paging_init(void); +void __init mem_init(void); +void free_initmem(void); +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long, unsigned long); +#endif + +/* It'd be good if these lines were in the standard header file. */ +#define START_PFN(nid) \ + (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT) +#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) + +#ifndef CONFIG_DISCONTIGMEM +unsigned long __init zone_sizes_init(void) +{ + unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; + unsigned long max_dma; + unsigned long low; + unsigned long start_pfn; + +#ifdef CONFIG_MMU + start_pfn = START_PFN(0); + max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; + low = MAX_LOW_PFN(0); + + if (low < max_dma){ + zones_size[ZONE_DMA] = low - start_pfn; + zones_size[ZONE_NORMAL] = 0; + } else { + zones_size[ZONE_DMA] = low - start_pfn; + zones_size[ZONE_NORMAL] = low - max_dma; + } +#else + zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; + zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; + start_pfn = __MEMORY_START >> PAGE_SHIFT; +#endif /* CONFIG_MMU */ + + free_area_init_node(0, NODE_DATA(0), zones_size, start_pfn, 0); + + return 0; +} +#else /* CONFIG_DISCONTIGMEM */ +extern unsigned long zone_sizes_init(void); +#endif /* CONFIG_DISCONTIGMEM */ + +/*======================================================================* + * paging_init() : sets up the page tables + *======================================================================*/ +void __init paging_init(void) +{ +#ifdef CONFIG_MMU + int i; + pgd_t *pg_dir; + + /* We don't need kernel mapping as hardware support that. */ + pg_dir = swapper_pg_dir; + + for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++) + pgd_val(pg_dir[i]) = 0; +#endif /* CONFIG_MMU */ + hole_pages = zone_sizes_init(); +} + +int __init reservedpages_count(void) +{ + int reservedpages, nid, i; + + reservedpages = 0; + for_each_online_node(nid) + for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++) + if (PageReserved(NODE_DATA(nid)->node_mem_map + i)) + reservedpages++; + + return reservedpages; +} + +/*======================================================================* + * mem_init() : + * orig : arch/sh/mm/init.c + *======================================================================*/ +void __init mem_init(void) +{ + int codesize, reservedpages, datasize, initsize; + int nid; +#ifndef CONFIG_MMU + extern unsigned long memory_end; +#endif + + num_physpages = 0; + for_each_online_node(nid) + num_physpages += MAX_LOW_PFN(nid) - START_PFN(nid) + 1; + + num_physpages -= hole_pages; + +#ifndef CONFIG_DISCONTIGMEM + max_mapnr = num_physpages; +#endif /* CONFIG_DISCONTIGMEM */ + +#ifdef CONFIG_MMU + high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0))); +#else + high_memory = (void *)(memory_end & PAGE_MASK); +#endif /* CONFIG_MMU */ + + /* clear the zero-page */ + memset(empty_zero_page, 0, PAGE_SIZE); + + /* this will put all low memory onto the freelists */ + for_each_online_node(nid) + totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); + + reservedpages = reservedpages_count() - hole_pages; + codesize = (unsigned long) &_etext - (unsigned long)&_text; + datasize = (unsigned long) &_edata - (unsigned long)&_etext; + initsize = (unsigned long) &__init_end - (unsigned long)&__init_begin; + + printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " + "%dk reserved, %dk data, %dk init)\n", + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + num_physpages << (PAGE_SHIFT-10), + codesize >> 10, + reservedpages << (PAGE_SHIFT-10), + datasize >> 10, + initsize >> 10); +} + +/*======================================================================* + * free_initmem() : + * orig : arch/sh/mm/init.c + *======================================================================*/ +void free_initmem(void) +{ + unsigned long addr; + + addr = (unsigned long)(&__init_begin); + for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); + free_page(addr); + totalram_pages++; + } + printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", \ + (int)(&__init_end - &__init_begin) >> 10); +} + +#ifdef CONFIG_BLK_DEV_INITRD +/*======================================================================* + * free_initrd_mem() : + * orig : arch/sh/mm/init.c + *======================================================================*/ +void free_initrd_mem(unsigned long start, unsigned long end) +{ + unsigned long p; + for (p = start; p < end; p += PAGE_SIZE) { + ClearPageReserved(virt_to_page(p)); + set_page_count(virt_to_page(p), 1); + free_page(p); + totalram_pages++; + } + printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); +} +#endif + diff --git a/arch/m32r/mm/ioremap-nommu.c b/arch/m32r/mm/ioremap-nommu.c new file mode 100644 index 0000000..2759f2d --- /dev/null +++ b/arch/m32r/mm/ioremap-nommu.c @@ -0,0 +1,52 @@ +/* + * linux/arch/m32r/mm/ioremap-nommu.c + * + * Copyright (c) 2001, 2002 Hiroyuki Kondo + * + * Taken from mips version. + * (C) Copyright 1995 1996 Linus Torvalds + * (C) Copyright 2001 Ralf Baechle + */ + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ + +#include <linux/module.h> +#include <asm/addrspace.h> +#include <asm/byteorder.h> + +#include <linux/vmalloc.h> +#include <asm/io.h> +#include <asm/pgalloc.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ + +#define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL)) + +void __iomem * +__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +{ + return (void *)phys_addr; +} + +#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) + +void iounmap(volatile void __iomem *addr) +{ +} + diff --git a/arch/m32r/mm/ioremap.c b/arch/m32r/mm/ioremap.c new file mode 100644 index 0000000..70c5905 --- /dev/null +++ b/arch/m32r/mm/ioremap.c @@ -0,0 +1,192 @@ +/* + * linux/arch/m32r/mm/ioremap.c + * + * Copyright (c) 2001, 2002 Hiroyuki Kondo + * + * Taken from mips version. + * (C) Copyright 1995 1996 Linus Torvalds + * (C) Copyright 2001 Ralf Baechle + */ + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ + +#include <linux/module.h> +#include <asm/addrspace.h> +#include <asm/byteorder.h> + +#include <linux/vmalloc.h> +#include <asm/io.h> +#include <asm/pgalloc.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +static inline void +remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, + unsigned long phys_addr, unsigned long flags) +{ + unsigned long end; + unsigned long pfn; + pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ + | _PAGE_WRITE | flags); + + address &= ~PMD_MASK; + end = address + size; + if (end > PMD_SIZE) + end = PMD_SIZE; + if (address >= end) + BUG(); + pfn = phys_addr >> PAGE_SHIFT; + do { + if (!pte_none(*pte)) { + printk("remap_area_pte: page already exists\n"); + BUG(); + } + set_pte(pte, pfn_pte(pfn, pgprot)); + address += PAGE_SIZE; + pfn++; + pte++; + } while (address && (address < end)); +} + +static inline int +remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, + unsigned long phys_addr, unsigned long flags) +{ + unsigned long end; + + address &= ~PGDIR_MASK; + end = address + size; + if (end > PGDIR_SIZE) + end = PGDIR_SIZE; + phys_addr -= address; + if (address >= end) + BUG(); + do { + pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); + if (!pte) + return -ENOMEM; + remap_area_pte(pte, address, end - address, address + phys_addr, flags); + address = (address + PMD_SIZE) & PMD_MASK; + pmd++; + } while (address && (address < end)); + return 0; +} + +static int +remap_area_pages(unsigned long address, unsigned long phys_addr, + unsigned long size, unsigned long flags) +{ + int error; + pgd_t * dir; + unsigned long end = address + size; + + phys_addr -= address; + dir = pgd_offset(&init_mm, address); + flush_cache_all(); + if (address >= end) + BUG(); + spin_lock(&init_mm.page_table_lock); + do { + pmd_t *pmd; + pmd = pmd_alloc(&init_mm, dir, address); + error = -ENOMEM; + if (!pmd) + break; + if (remap_area_pmd(pmd, address, end - address, + phys_addr + address, flags)) + break; + error = 0; + address = (address + PGDIR_SIZE) & PGDIR_MASK; + dir++; + } while (address && (address < end)); + spin_unlock(&init_mm.page_table_lock); + flush_tlb_all(); + return error; +} + +/* + * Generic mapping function (not visible outside): + */ + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ + +#define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL)) + +void __iomem * +__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +{ + void __iomem * addr; + struct vm_struct * area; + unsigned long offset, last_addr; + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * Map objects in the low 512mb of address space using KSEG1, otherwise + * map using page tables. + */ + if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1)) + return (void *) KSEG1ADDR(phys_addr); + + /* + * Don't allow anybody to remap normal RAM that we're using.. + */ + if (phys_addr < virt_to_phys(high_memory)) { + char *t_addr, *t_end; + struct page *page; + + t_addr = __va(phys_addr); + t_end = t_addr + (size - 1); + + for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) + if(!PageReserved(page)) + return NULL; + } + + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr + 1) - phys_addr; + + /* + * Ok, go for it.. + */ + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + area->phys_addr = phys_addr; + addr = (void __iomem *) area->addr; + if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) { + vunmap((void __force *) addr); + return NULL; + } + + return (void __iomem *) (offset + (char __iomem *)addr); +} + +#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) + +void iounmap(volatile void __iomem *addr) +{ + if (!IS_KSEG1(addr)) + vfree((void *) (PAGE_MASK & (unsigned long) addr)); +} + diff --git a/arch/m32r/mm/mmu.S b/arch/m32r/mm/mmu.S new file mode 100644 index 0000000..0c28f11 --- /dev/null +++ b/arch/m32r/mm/mmu.S @@ -0,0 +1,350 @@ +/* + * linux/arch/m32r/mm/mmu.S + * + * Copyright (C) 2001 by Hiroyuki Kondo + */ + +/* $Id: mmu.S,v 1.15 2004/03/16 02:56:27 takata Exp $ */ + +#include <linux/config.h> /* CONFIG_MMU */ +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/smp.h> + + .text +#ifdef CONFIG_MMU + +#include <asm/mmu_context.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/m32r.h> + +/* + * TLB Miss Exception handler + */ + .balign 16 +ENTRY(tme_handler) + .global tlb_entry_i_dat + .global tlb_entry_d_dat + + SWITCH_TO_KERNEL_STACK + +#if defined(CONFIG_ISA_M32R2) + st r0, @-sp + st r1, @-sp + st r2, @-sp + st r3, @-sp + + seth r3, #high(MMU_REG_BASE) + ld r1, @(MESTS_offset, r3) ; r1: status (MESTS reg.) + ld r0, @(MDEVP_offset, r3) ; r0: PFN + ASID (MDEVP reg.) + st r1, @(MESTS_offset, r3) ; clear status (MESTS reg.) + and3 r1, r1, #(MESTS_IT) + bnez r1, 1f ; instruction TLB miss? + +;; data TLB miss +;; input +;; r0: PFN + ASID (MDEVP reg.) +;; r1 - r3: free +;; output +;; r0: PFN + ASID +;; r1: TLB entry base address +;; r2: &tlb_entry_{i|d}_dat +;; r3: free + +#ifndef CONFIG_SMP + seth r2, #high(tlb_entry_d_dat) + or3 r2, r2, #low(tlb_entry_d_dat) +#else /* CONFIG_SMP */ + ldi r1, #-8192 + seth r2, #high(tlb_entry_d_dat) + or3 r2, r2, #low(tlb_entry_d_dat) + and r1, sp + ld r1, @(16, r1) ; current_thread_info->cpu + slli r1, #2 + add r2, r1 +#endif /* !CONFIG_SMP */ + seth r1, #high(DTLB_BASE) + or3 r1, r1, #low(DTLB_BASE) + bra 2f + + .balign 16 + .fillinsn +1: +;; instrucntion TLB miss +;; input +;; r0: MDEVP reg. (included ASID) +;; r1 - r3: free +;; output +;; r0: PFN + ASID +;; r1: TLB entry base address +;; r2: &tlb_entry_{i|d}_dat +;; r3: free + ldi r3, #-4096 + and3 r0, r0, #(MMU_CONTEXT_ASID_MASK) + mvfc r1, bpc + and r1, r3 + or r0, r1 ; r0: PFN + ASID +#ifndef CONFIG_SMP + seth r2, #high(tlb_entry_i_dat) + or3 r2, r2, #low(tlb_entry_i_dat) +#else /* CONFIG_SMP */ + ldi r1, #-8192 + seth r2, #high(tlb_entry_i_dat) + or3 r2, r2, #low(tlb_entry_i_dat) + and r1, sp + ld r1, @(16, r1) ; current_thread_info->cpu + slli r1, #2 + add r2, r1 +#endif /* !CONFIG_SMP */ + seth r1, #high(ITLB_BASE) + or3 r1, r1, #low(ITLB_BASE) + + .fillinsn +2: +;; select TLB entry +;; input +;; r0: PFN + ASID +;; r1: TLB entry base address +;; r2: &tlb_entry_{i|d}_dat +;; r3: free +;; output +;; r0: PFN + ASID +;; r1: TLB entry address +;; r2, r3: free +#ifdef CONFIG_ISA_DUAL_ISSUE + ld r3, @r2 || srli r1, #3 +#else + ld r3, @r2 + srli r1, #3 +#endif + add r1, r3 + ; tlb_entry_{d|i}_dat++; + addi r3, #1 + and3 r3, r3, #(NR_TLB_ENTRIES - 1) +#ifdef CONFIG_ISA_DUAL_ISSUE + st r3, @r2 || slli r1, #3 +#else + st r3, @r2 + slli r1, #3 +#endif + +;; load pte +;; input +;; r0: PFN + ASID +;; r1: TLB entry address +;; r2, r3: free +;; output +;; r0: PFN + ASID +;; r1: TLB entry address +;; r2: pte_data +;; r3: free + ; pgd = *(unsigned long *)MPTB; + ld24 r2, #(-MPTB - 1) + srl3 r3, r0, #22 +#ifdef CONFIG_ISA_DUAL_ISSUE + not r2, r2 || slli r3, #2 ; r3: pgd offset +#else + not r2, r2 + slli r3, #2 +#endif + ld r2, @r2 ; r2: pgd base addr (MPTB reg.) + or r3, r2 ; r3: pmd addr + + ; pmd = pmd_offset(pgd, address); + ld r3, @r3 ; r3: pmd data + ldi r2, #-4096 + beqz r3, 3f ; pmd_none(*pmd) ? + + ; pte = pte_offset(pmd, address); + and r2, r3 ; r2: pte base addr + srl3 r3, r0, #10 + and3 r3, r3, #0xffc ; r3: pte offset + or r3, r2 + seth r2, #0x8000 + or r3, r2 ; r3: pte addr + + ; pte_data = (unsigned long)pte_val(*pte); + ld r2, @r3 ; r2: pte data + or3 r2, r2, #2 ; _PAGE_PRESENT(=2) + + .fillinsn +5: +;; set tlb +;; input +;; r0: PFN + ASID +;; r1: TLB entry address +;; r2: pte_data +;; r3: free + st r0, @r1 ; set_tlb_tag(entry++, address); + st r2, @+r1 ; set_tlb_data(entry, pte_data); + + .fillinsn +6: + ld r3, @sp+ + ld r2, @sp+ + ld r1, @sp+ + ld r0, @sp+ + rte + + .fillinsn +3: +;; error +;; input +;; r0: PFN + ASID +;; r1: TLB entry address +;; r2, r3: free +;; output +;; r0: PFN + ASID +;; r1: TLB entry address +;; r2: pte_data +;; r3: free +#ifdef CONFIG_ISA_DUAL_ISSUE + bra 5b || ldi r2, #2 +#else + ldi r2, #2 ; r2: pte_data = 0 | _PAGE_PRESENT(=2) + bra 5b +#endif + +#elif defined (CONFIG_ISA_M32R) + + st sp, @-sp + st r0, @-sp + st r1, @-sp + st r2, @-sp + st r3, @-sp + st r4, @-sp + + seth r3, #high(MMU_REG_BASE) + ld r0, @(MDEVA_offset,r3) ; r0: address (MDEVA reg.) + mvfc r2, bpc ; r2: bpc + ld r1, @(MESTS_offset,r3) ; r1: status (MESTS reg.) + st r1, @(MESTS_offset,r3) ; clear status (MESTS reg.) + and3 r1, r1, #(MESTS_IT) + beqz r1, 1f ; data TLB miss? + +;; instrucntion TLB miss + mv r0, r2 ; address = bpc; + ; entry = (unsigned long *)ITLB_BASE+tlb_entry_i*2; + seth r3, #shigh(tlb_entry_i_dat) + ld r4, @(low(tlb_entry_i_dat),r3) + sll3 r2, r4, #3 + seth r1, #high(ITLB_BASE) + or3 r1, r1, #low(ITLB_BASE) + add r2, r1 ; r2: entry + addi r4, #1 ; tlb_entry_i++; + and3 r4, r4, #(NR_TLB_ENTRIES-1) + st r4, @(low(tlb_entry_i_dat),r3) + bra 2f + .fillinsn +1: +;; data TLB miss + ; entry = (unsigned long *)DTLB_BASE+tlb_entry_d*2; + seth r3, #shigh(tlb_entry_d_dat) + ld r4, @(low(tlb_entry_d_dat),r3) + sll3 r2, r4, #3 + seth r1, #high(DTLB_BASE) + or3 r1, r1, #low(DTLB_BASE) + add r2, r1 ; r2: entry + addi r4, #1 ; tlb_entry_d++; + and3 r4, r4, #(NR_TLB_ENTRIES-1) + st r4, @(low(tlb_entry_d_dat),r3) + .fillinsn +2: +;; load pte +; r0: address, r2: entry +; r1,r3,r4: (free) + ; pgd = *(unsigned long *)MPTB; + ld24 r1, #(-MPTB-1) + not r1, r1 + ld r1, @r1 + srl3 r4, r0, #22 + sll3 r3, r4, #2 + add r3, r1 ; r3: pgd + ; pmd = pmd_offset(pgd, address); + ld r1, @r3 ; r1: pmd + beqz r1, 3f ; pmd_none(*pmd) ? +; + and3 r1, r1, #0xeff + ldi r4, #611 ; _KERNPG_TABLE(=611) + beq r1, r4, 4f ; !pmd_bad(*pmd) ? + .fillinsn +3: + ldi r1, #0 ; r1: pte_data = 0 + bra 5f + .fillinsn +4: + ; pte = pte_offset(pmd, address); + ld r4, @r3 ; r4: pte + ldi r3, #-4096 + and r4, r3 + srl3 r3, r0, #10 + and3 r3, r3, #0xffc + add r4, r3 + seth r3, #0x8000 + add r4, r3 ; r4: pte + ; pte_data = (unsigned long)pte_val(*pte); + ld r1, @r4 ; r1: pte_data + .fillinsn + +;; set tlb +; r0: address, r1: pte_data, r2: entry +; r3,r4: (free) +5: + ldi r3, #-4096 ; set_tlb_tag(entry++, address); + and r3, r0 + seth r4, #shigh(MASID) + ld r4, @(low(MASID),r4) ; r4: MASID + and3 r4, r4, #(MMU_CONTEXT_ASID_MASK) + or r3, r4 + st r3, @r2 + or3 r4, r1, #2 ; _PAGE_PRESENT(=2) + st r4, @(4,r2) ; set_tlb_data(entry, pte_data); + + ld r4, @sp+ + ld r3, @sp+ + ld r2, @sp+ + ld r1, @sp+ + ld r0, @sp+ + ld sp, @sp+ + rte + +#else +#error unknown isa configuration +#endif + +ENTRY(init_tlb) +;; Set MMU Register + seth r0, #high(MMU_REG_BASE) ; Set MMU_REG_BASE higher + or3 r0, r0, #low(MMU_REG_BASE) ; Set MMU_REG_BASE lower + ldi r1, #0 + st r1, @(MPSZ_offset,r0) ; Set MPSZ Reg(Page size 4KB:0 16KB:1 64KB:2) + ldi r1, #0 + st r1, @(MASID_offset,r0) ; Set ASID Zero + +;; Set TLB + seth r0, #high(ITLB_BASE) ; Set ITLB_BASE higher + or3 r0, r0, #low(ITLB_BASE) ; Set ITLB_BASE lower + seth r1, #high(DTLB_BASE) ; Set DTLB_BASE higher + or3 r1, r1, #low(DTLB_BASE) ; Set DTLB_BASE lower + ldi r2, #0 + ldi r3, #NR_TLB_ENTRIES + addi r0, #-4 + addi r1, #-4 +clear_tlb: + st r2, @+r0 ; VPA <- 0 + st r2, @+r0 ; PPA <- 0 + st r2, @+r1 ; VPA <- 0 + st r2, @+r1 ; PPA <- 0 + addi r3, #-1 + bnez r3, clear_tlb +;; + jmp r14 + +ENTRY(m32r_itlb_entrys) +ENTRY(m32r_otlb_entrys) + +#endif /* CONFIG_MMU */ + +.end + diff --git a/arch/m32r/mm/page.S b/arch/m32r/mm/page.S new file mode 100644 index 0000000..a2e9367 --- /dev/null +++ b/arch/m32r/mm/page.S @@ -0,0 +1,82 @@ +/* + * linux/arch/m32r/mm/page.S + * + * Clear/Copy page with CPU + * + * Copyright (C) 2004 The Free Software Initiative of Japan + * + * Written by Niibe Yutaka + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ + .text + .global copy_page + /* + * copy_page (to, from) + * + * PAGE_SIZE = 4096-byte + * Cache line = 16-byte + * 16 * 256 + */ + .align 4 +copy_page: + ldi r2, #255 + ld r3, @r0 /* cache line allocate */ + ld r4, @r1+ + ld r5, @r1+ + ld r6, @r1+ + ld r7, @r1+ + .fillinsn +0: + st r4, @r0 + st r5, @+r0 + st r6, @+r0 + st r7, @+r0 + ld r4, @r1+ + addi r0, #4 + ld r5, @r1+ + ld r6, @r1+ + ld r7, @r1+ + ld r3, @r0 /* cache line allocate */ + addi r2, #-1 + bnez r2, 0b + + st r4, @r0 + st r5, @+r0 + st r6, @+r0 + st r7, @+r0 + jmp r14 + + .text + .global clear_page + /* + * clear_page (to) + * + * PAGE_SIZE = 4096-byte + * Cache line = 16-byte + * 16 * 256 + */ + .align 4 +clear_page: + ldi r2, #255 + ldi r4, #0 + ld r3, @r0 /* cache line allocate */ + .fillinsn +0: + st r4, @r0 + st r4, @+r0 + st r4, @+r0 + st r4, @+r0 + addi r0, #4 + ld r3, @r0 /* cache line allocate */ + addi r2, #-1 + bnez r2, 0b + + st r4, @r0 + st r4, @+r0 + st r4, @+r0 + st r4, @+r0 + jmp r14 |