summaryrefslogtreecommitdiffstats
path: root/arch/metag/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/metag/mm')
-rw-r--r--arch/metag/mm/Kconfig147
-rw-r--r--arch/metag/mm/Makefile20
-rw-r--r--arch/metag/mm/cache.c521
-rw-r--r--arch/metag/mm/extable.c15
-rw-r--r--arch/metag/mm/fault.c247
-rw-r--r--arch/metag/mm/highmem.c122
-rw-r--r--arch/metag/mm/hugetlbpage.c251
-rw-r--r--arch/metag/mm/init.c408
-rw-r--r--arch/metag/mm/ioremap.c90
-rw-r--r--arch/metag/mm/l2cache.c193
-rw-r--r--arch/metag/mm/maccess.c69
-rw-r--r--arch/metag/mm/mmu-meta1.c157
-rw-r--r--arch/metag/mm/mmu-meta2.c208
-rw-r--r--arch/metag/mm/numa.c82
14 files changed, 0 insertions, 2530 deletions
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
deleted file mode 100644
index 9d4b2c6..0000000
--- a/arch/metag/mm/Kconfig
+++ /dev/null
@@ -1,147 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-menu "Memory management options"
-
-config PAGE_OFFSET
- hex "Kernel page offset address"
- default "0x40000000"
- help
- This option allows you to set the virtual address at which the
- kernel will be mapped to.
-endmenu
-
-config KERNEL_4M_PAGES
- bool "Map kernel with 4MB pages"
- depends on METAG_META21_MMU
- default y
- help
- Map the kernel with large pages to reduce TLB pressure.
-
-choice
- prompt "User page size"
- default PAGE_SIZE_4K
-
-config PAGE_SIZE_4K
- bool "4kB"
- help
- This is the default page size used by all Meta cores.
-
-config PAGE_SIZE_8K
- bool "8kB"
- depends on METAG_META21_MMU
- help
- This enables 8kB pages as supported by Meta 2.x and later MMUs.
-
-config PAGE_SIZE_16K
- bool "16kB"
- depends on METAG_META21_MMU
- help
- This enables 16kB pages as supported by Meta 2.x and later MMUs.
-
-endchoice
-
-config NUMA
- bool "Non Uniform Memory Access (NUMA) Support"
- select ARCH_WANT_NUMA_VARIABLE_LOCALITY
- help
- Some Meta systems have MMU-mappable on-chip memories with
- lower latencies than main memory. This enables support for
- these blocks by binding them to nodes and allowing
- memory policies to be used for prioritizing and controlling
- allocation behaviour.
-
-config FORCE_MAX_ZONEORDER
- int "Maximum zone order"
- range 10 32
- default "10"
- help
- The kernel memory allocator divides physically contiguous memory
- blocks into "zones", where each zone is a power of two number of
- pages. This option selects the largest power of two that the kernel
- keeps in the memory allocator. If you need to allocate very large
- blocks of physically contiguous memory, then you may need to
- increase this value.
-
- This config option is actually maximum order plus one. For example,
- a value of 11 means that the largest free memory block is 2^10 pages.
-
- The page size is not necessarily 4KB. Keep this in mind
- when choosing a value for this option.
-
-config METAG_L2C
- bool "Level 2 Cache Support"
- depends on METAG_META21
- help
- Press y here to enable support for the Meta Level 2 (L2) cache. This
- will enable the cache at start up if it hasn't already been enabled
- by the bootloader.
-
- If the bootloader enables the L2 you must press y here to ensure the
- kernel takes the appropriate actions to keep the cache coherent.
-
-config NODES_SHIFT
- int
- default "1"
- depends on NEED_MULTIPLE_NODES
-
-config ARCH_FLATMEM_ENABLE
- def_bool y
- depends on !NUMA
-
-config ARCH_SPARSEMEM_ENABLE
- def_bool y
- select SPARSEMEM_STATIC
-
-config ARCH_SPARSEMEM_DEFAULT
- def_bool y
-
-config ARCH_SELECT_MEMORY_MODEL
- def_bool y
-
-config SYS_SUPPORTS_HUGETLBFS
- def_bool y
- depends on METAG_META21_MMU
-
-choice
- prompt "HugeTLB page size"
- depends on METAG_META21_MMU && HUGETLB_PAGE
- default HUGETLB_PAGE_SIZE_1M
-
-config HUGETLB_PAGE_SIZE_8K
- bool "8kB"
- depends on PAGE_SIZE_4K
-
-config HUGETLB_PAGE_SIZE_16K
- bool "16kB"
- depends on PAGE_SIZE_4K || PAGE_SIZE_8K
-
-config HUGETLB_PAGE_SIZE_32K
- bool "32kB"
-
-config HUGETLB_PAGE_SIZE_64K
- bool "64kB"
-
-config HUGETLB_PAGE_SIZE_128K
- bool "128kB"
-
-config HUGETLB_PAGE_SIZE_256K
- bool "256kB"
-
-config HUGETLB_PAGE_SIZE_512K
- bool "512kB"
-
-config HUGETLB_PAGE_SIZE_1M
- bool "1MB"
-
-config HUGETLB_PAGE_SIZE_2M
- bool "2MB"
-
-config HUGETLB_PAGE_SIZE_4M
- bool "4MB"
-
-endchoice
-
-config METAG_COREMEM
- bool
- default y if SUSPEND
-
-source "mm/Kconfig"
diff --git a/arch/metag/mm/Makefile b/arch/metag/mm/Makefile
deleted file mode 100644
index 0c7c91b..0000000
--- a/arch/metag/mm/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the linux Meta-specific parts of the memory manager.
-#
-
-obj-y += cache.o
-obj-y += extable.o
-obj-y += fault.o
-obj-y += init.o
-obj-y += ioremap.o
-obj-y += maccess.o
-
-mmu-y := mmu-meta1.o
-mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o
-obj-y += $(mmu-y)
-
-obj-$(CONFIG_HIGHMEM) += highmem.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_METAG_L2C) += l2cache.o
-obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c
deleted file mode 100644
index a622852..0000000
--- a/arch/metag/mm/cache.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * arch/metag/mm/cache.c
- *
- * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- *
- * Cache control code
- */
-
-#include <linux/export.h>
-#include <linux/io.h>
-#include <asm/cacheflush.h>
-#include <asm/core_reg.h>
-#include <asm/global_lock.h>
-#include <asm/metag_isa.h>
-#include <asm/metag_mem.h>
-#include <asm/metag_regs.h>
-
-#define DEFAULT_CACHE_WAYS_LOG2 2
-
-/*
- * Size of a set in the caches. Initialised for default 16K stride, adjusted
- * according to values passed through TBI global heap segment via LDLK (on ATP)
- * or config registers (on HTP/MTP)
- */
-static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
- - DEFAULT_CACHE_WAYS_LOG2;
-static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
- - DEFAULT_CACHE_WAYS_LOG2;
-/*
- * The number of sets in the caches. Initialised for HTP/ATP, adjusted
- * according to NOMMU setting in config registers
- */
-static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
-static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
-
-#ifndef CONFIG_METAG_META12
-/**
- * metag_lnkget_probe() - Probe whether lnkget/lnkset go around the cache
- */
-static volatile u32 lnkget_testdata[16] __initdata __aligned(64);
-
-#define LNKGET_CONSTANT 0xdeadbeef
-
-static void __init metag_lnkget_probe(void)
-{
- int temp;
- long flags;
-
- /*
- * It's conceivable the user has configured a globally coherent cache
- * shared with non-Linux hardware threads, so use LOCK2 to prevent them
- * from executing and causing cache eviction during the test.
- */
- __global_lock2(flags);
-
- /* read a value to bring it into the cache */
- (void)lnkget_testdata[0];
- lnkget_testdata[0] = 0;
-
- /* lnkget/lnkset it to modify it */
- asm volatile(
- "1: LNKGETD %0, [%1]\n"
- " LNKSETD [%1], %2\n"
- " DEFR %0, TXSTAT\n"
- " ANDT %0, %0, #HI(0x3f000000)\n"
- " CMPT %0, #HI(0x02000000)\n"
- " BNZ 1b\n"
- : "=&d" (temp)
- : "da" (&lnkget_testdata[0]), "bd" (LNKGET_CONSTANT)
- : "cc");
-
- /* re-read it to see if the cached value changed */
- temp = lnkget_testdata[0];
-
- __global_unlock2(flags);
-
- /* flush the cache line to fix any incoherency */
- __builtin_dcache_flush((void *)&lnkget_testdata[0]);
-
-#if defined(CONFIG_METAG_LNKGET_AROUND_CACHE)
- /* if the cache is right, LNKGET_AROUND_CACHE is unnecessary */
- if (temp == LNKGET_CONSTANT)
- pr_info("LNKGET/SET go through cache but CONFIG_METAG_LNKGET_AROUND_CACHE=y\n");
-#elif defined(CONFIG_METAG_ATOMICITY_LNKGET)
- /*
- * if the cache is wrong, LNKGET_AROUND_CACHE is really necessary
- * because the kernel is configured to use LNKGET/SET for atomicity
- */
- WARN(temp != LNKGET_CONSTANT,
- "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"
- "Expect kernel failure as it's used for atomicity primitives\n");
-#elif defined(CONFIG_SMP)
- /*
- * if the cache is wrong, LNKGET_AROUND_CACHE should be used or the
- * gateway page won't flush and userland could break.
- */
- WARN(temp != LNKGET_CONSTANT,
- "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"
- "Expect userland failure as it's used for user gateway page\n");
-#else
- /*
- * if the cache is wrong, LNKGET_AROUND_CACHE is set wrong, but it
- * doesn't actually matter as it doesn't have any effect on !SMP &&
- * !ATOMICITY_LNKGET.
- */
- if (temp != LNKGET_CONSTANT)
- pr_warn("LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n");
-#endif
-}
-#endif /* !CONFIG_METAG_META12 */
-
-/**
- * metag_cache_probe() - Probe L1 cache configuration.
- *
- * Probe the L1 cache configuration to aid the L1 physical cache flushing
- * functions.
- */
-void __init metag_cache_probe(void)
-{
-#ifndef CONFIG_METAG_META12
- int coreid = metag_in32(METAC_CORE_ID);
- int config = metag_in32(METAC_CORE_CONFIG2);
- int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS;
-
- if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 ||
- cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) {
- icache_sets_log2 = 1;
- dcache_sets_log2 = 1;
- }
-
- /* For normal size caches, the smallest size is 4Kb.
- For small caches, the smallest size is 64b */
- icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT)
- ? 6 : 12;
- icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS)
- >> METAC_CORE_C2ICSZ_S;
- icache_set_shift -= icache_sets_log2;
-
- dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT)
- ? 6 : 12;
- dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS)
- >> METAC_CORECFG2_DCSZ_S;
- dcache_set_shift -= dcache_sets_log2;
-
- metag_lnkget_probe();
-#else
- /* Extract cache sizes from global heap segment */
- unsigned long val, u;
- int width, shift, addend;
- PTBISEG seg;
-
- seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
- TBID_SEGSCOPE_GLOBAL,
- TBID_SEGTYPE_HEAP));
- if (seg != NULL) {
- val = seg->Data[1];
-
- /* Work out width of I-cache size bit-field */
- u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS)
- >> METAG_TBI_ICACHE_SIZE_S;
- width = 0;
- while (u & 1) {
- width++;
- u >>= 1;
- }
- /* Extract sign-extended size addend value */
- shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width);
- addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS)
- << shift)
- >> (shift + METAG_TBI_ICACHE_SIZE_S);
- /* Now calculate I-cache set size */
- icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
- - DEFAULT_CACHE_WAYS_LOG2)
- + addend;
-
- /* Similarly for D-cache */
- u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS)
- >> METAG_TBI_DCACHE_SIZE_S;
- width = 0;
- while (u & 1) {
- width++;
- u >>= 1;
- }
- shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width);
- addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS)
- << shift)
- >> (shift + METAG_TBI_DCACHE_SIZE_S);
- dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
- - DEFAULT_CACHE_WAYS_LOG2)
- + addend;
- }
-#endif
-}
-
-static void metag_phys_data_cache_flush(const void *start)
-{
- unsigned long flush0, flush1, flush2, flush3;
- int loops, step;
- int thread;
- int part, offset;
- int set_shift;
-
- /* Use a sequence of writes to flush the cache region requested */
- thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
- >> TXENABLE_THREAD_S;
-
- /* Cache is broken into sets which lie in contiguous RAMs */
- set_shift = dcache_set_shift;
-
- /* Move to the base of the physical cache flush region */
- flush0 = LINSYSCFLUSH_DCACHE_LINE;
- step = 64;
-
- /* Get partition data for this thread */
- part = metag_in32(SYSC_DCPART0 +
- (SYSC_xCPARTn_STRIDE * thread));
-
- if ((int)start < 0)
- /* Access Global vs Local partition */
- part >>= SYSC_xCPARTG_AND_S
- - SYSC_xCPARTL_AND_S;
-
- /* Extract offset and move SetOff */
- offset = (part & SYSC_xCPARTL_OR_BITS)
- >> SYSC_xCPARTL_OR_S;
- flush0 += (offset << (set_shift - 4));
-
- /* Shrink size */
- part = (part & SYSC_xCPARTL_AND_BITS)
- >> SYSC_xCPARTL_AND_S;
- loops = ((part + 1) << (set_shift - 4));
-
- /* Reduce loops by step of cache line size */
- loops /= step;
-
- flush1 = flush0 + (1 << set_shift);
- flush2 = flush0 + (2 << set_shift);
- flush3 = flush0 + (3 << set_shift);
-
- if (dcache_sets_log2 == 1) {
- flush2 = flush1;
- flush3 = flush1 + step;
- flush1 = flush0 + step;
- step <<= 1;
- loops >>= 1;
- }
-
- /* Clear loops ways in cache */
- while (loops-- != 0) {
- /* Clear the ways. */
-#if 0
- /*
- * GCC doesn't generate very good code for this so we
- * provide inline assembly instead.
- */
- metag_out8(0, flush0);
- metag_out8(0, flush1);
- metag_out8(0, flush2);
- metag_out8(0, flush3);
-
- flush0 += step;
- flush1 += step;
- flush2 += step;
- flush3 += step;
-#else
- asm volatile (
- "SETB\t[%0+%4++],%5\n"
- "SETB\t[%1+%4++],%5\n"
- "SETB\t[%2+%4++],%5\n"
- "SETB\t[%3+%4++],%5\n"
- : "+e" (flush0),
- "+e" (flush1),
- "+e" (flush2),
- "+e" (flush3)
- : "e" (step), "a" (0));
-#endif
- }
-}
-
-void metag_data_cache_flush_all(const void *start)
-{
- if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
- /* No need to flush the data cache it's not actually enabled */
- return;
-
- metag_phys_data_cache_flush(start);
-}
-
-void metag_data_cache_flush(const void *start, int bytes)
-{
- unsigned long flush0;
- int loops, step;
-
- if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
- /* No need to flush the data cache it's not actually enabled */
- return;
-
- if (bytes >= 4096) {
- metag_phys_data_cache_flush(start);
- return;
- }
-
- /* Use linear cache flush mechanism on META IP */
- flush0 = (int)start;
- loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes +
- (DCACHE_LINE_BYTES - 1);
- loops >>= DCACHE_LINE_S;
-
-#define PRIM_FLUSH(addr, offset) do { \
- int __addr = ((int) (addr)) + ((offset) * 64); \
- __builtin_dcache_flush((void *)(__addr)); \
- } while (0)
-
-#define LOOP_INC (4*64)
-
- do {
- /* By default stop */
- step = 0;
-
- switch (loops) {
- /* Drop Thru Cases! */
- default:
- PRIM_FLUSH(flush0, 3);
- loops -= 4;
- step = 1;
- case 3:
- PRIM_FLUSH(flush0, 2);
- case 2:
- PRIM_FLUSH(flush0, 1);
- case 1:
- PRIM_FLUSH(flush0, 0);
- flush0 += LOOP_INC;
- case 0:
- break;
- }
- } while (step);
-}
-EXPORT_SYMBOL(metag_data_cache_flush);
-
-static void metag_phys_code_cache_flush(const void *start, int bytes)
-{
- unsigned long flush0, flush1, flush2, flush3, end_set;
- int loops, step;
- int thread;
- int set_shift, set_size;
- int part, offset;
-
- /* Use a sequence of writes to flush the cache region requested */
- thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
- >> TXENABLE_THREAD_S;
- set_shift = icache_set_shift;
-
- /* Move to the base of the physical cache flush region */
- flush0 = LINSYSCFLUSH_ICACHE_LINE;
- step = 64;
-
- /* Get partition code for this thread */
- part = metag_in32(SYSC_ICPART0 +
- (SYSC_xCPARTn_STRIDE * thread));
-
- if ((int)start < 0)
- /* Access Global vs Local partition */
- part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S;
-
- /* Extract offset and move SetOff */
- offset = (part & SYSC_xCPARTL_OR_BITS)
- >> SYSC_xCPARTL_OR_S;
- flush0 += (offset << (set_shift - 4));
-
- /* Shrink size */
- part = (part & SYSC_xCPARTL_AND_BITS)
- >> SYSC_xCPARTL_AND_S;
- loops = ((part + 1) << (set_shift - 4));
-
- /* Where does the Set end? */
- end_set = flush0 + loops;
- set_size = loops;
-
-#ifdef CONFIG_METAG_META12
- if ((bytes < 4096) && (bytes < loops)) {
- /* Unreachable on HTP/MTP */
- /* Only target the sets that could be relavent */
- flush0 += (loops - step) & ((int) start);
- loops = (((int) start) & (step-1)) + bytes + step - 1;
- }
-#endif
-
- /* Reduce loops by step of cache line size */
- loops /= step;
-
- flush1 = flush0 + (1<<set_shift);
- flush2 = flush0 + (2<<set_shift);
- flush3 = flush0 + (3<<set_shift);
-
- if (icache_sets_log2 == 1) {
- flush2 = flush1;
- flush3 = flush1 + step;
- flush1 = flush0 + step;
-#if 0
- /* flush0 will stop one line early in this case
- * (flush1 will do the final line).
- * However we don't correct end_set here at the moment
- * because it will never wrap on HTP/MTP
- */
- end_set -= step;
-#endif
- step <<= 1;
- loops >>= 1;
- }
-
- /* Clear loops ways in cache */
- while (loops-- != 0) {
-#if 0
- /*
- * GCC doesn't generate very good code for this so we
- * provide inline assembly instead.
- */
- /* Clear the ways */
- metag_out8(0, flush0);
- metag_out8(0, flush1);
- metag_out8(0, flush2);
- metag_out8(0, flush3);
-
- flush0 += step;
- flush1 += step;
- flush2 += step;
- flush3 += step;
-#else
- asm volatile (
- "SETB\t[%0+%4++],%5\n"
- "SETB\t[%1+%4++],%5\n"
- "SETB\t[%2+%4++],%5\n"
- "SETB\t[%3+%4++],%5\n"
- : "+e" (flush0),
- "+e" (flush1),
- "+e" (flush2),
- "+e" (flush3)
- : "e" (step), "a" (0));
-#endif
-
- if (flush0 == end_set) {
- /* Wrap within Set 0 */
- flush0 -= set_size;
- flush1 -= set_size;
- flush2 -= set_size;
- flush3 -= set_size;
- }
- }
-}
-
-void metag_code_cache_flush_all(const void *start)
-{
- if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
- /* No need to flush the code cache it's not actually enabled */
- return;
-
- metag_phys_code_cache_flush(start, 4096);
-}
-EXPORT_SYMBOL(metag_code_cache_flush_all);
-
-void metag_code_cache_flush(const void *start, int bytes)
-{
-#ifndef CONFIG_METAG_META12
- void *flush;
- int loops, step;
-#endif /* !CONFIG_METAG_META12 */
-
- if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
- /* No need to flush the code cache it's not actually enabled */
- return;
-
-#ifdef CONFIG_METAG_META12
- /* CACHEWD isn't available on Meta1, so always do full cache flush */
- metag_phys_code_cache_flush(start, bytes);
-
-#else /* CONFIG_METAG_META12 */
- /* If large size do full physical cache flush */
- if (bytes >= 4096) {
- metag_phys_code_cache_flush(start, bytes);
- return;
- }
-
- /* Use linear cache flush mechanism on META IP */
- flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1));
- loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes +
- (ICACHE_LINE_BYTES-1);
- loops >>= ICACHE_LINE_S;
-
-#define PRIM_IFLUSH(addr, offset) \
- __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT)
-
-#define LOOP_INC (4*64)
-
- do {
- /* By default stop */
- step = 0;
-
- switch (loops) {
- /* Drop Thru Cases! */
- default:
- PRIM_IFLUSH(flush, 3);
- loops -= 4;
- step = 1;
- case 3:
- PRIM_IFLUSH(flush, 2);
- case 2:
- PRIM_IFLUSH(flush, 1);
- case 1:
- PRIM_IFLUSH(flush, 0);
- flush += LOOP_INC;
- case 0:
- break;
- }
- } while (step);
-#endif /* !CONFIG_METAG_META12 */
-}
-EXPORT_SYMBOL(metag_code_cache_flush);
diff --git a/arch/metag/mm/extable.c b/arch/metag/mm/extable.c
deleted file mode 100644
index 9b92d3a..0000000
--- a/arch/metag/mm/extable.c
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-int fixup_exception(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
- unsigned long pc = instruction_pointer(regs);
-
- fixup = search_exception_tables(pc);
- if (fixup)
- regs->ctx.CurrPC = fixup->fixup;
-
- return fixup != NULL;
-}
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
deleted file mode 100644
index de54fe6..0000000
--- a/arch/metag/mm/fault.c
+++ /dev/null
@@ -1,247 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Meta page fault handling.
- *
- * Copyright (C) 2005-2012 Imagination Technologies Ltd.
- */
-
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-#include <linux/sched/debug.h>
-#include <linux/interrupt.h>
-#include <linux/uaccess.h>
-
-#include <asm/tlbflush.h>
-#include <asm/mmu.h>
-#include <asm/traps.h>
-
-/* Clear any pending catch buffer state. */
-static void clear_cbuf_entry(struct pt_regs *regs, unsigned long addr,
- unsigned int trapno)
-{
- PTBICTXEXTCB0 cbuf = regs->extcb0;
-
- switch (trapno) {
- /* Instruction fetch faults leave no catch buffer state. */
- case TBIXXF_SIGNUM_IGF:
- case TBIXXF_SIGNUM_IPF:
- return;
- default:
- if (cbuf[0].CBAddr == addr) {
- cbuf[0].CBAddr = 0;
- cbuf[0].CBFlags &= ~TXCATCH0_FAULT_BITS;
-
- /* And, as this is the ONLY catch entry, we
- * need to clear the cbuf bit from the context!
- */
- regs->ctx.SaveMask &= ~(TBICTX_CBUF_BIT |
- TBICTX_XCBF_BIT);
-
- return;
- }
- pr_err("Failed to clear cbuf entry!\n");
- }
-}
-
-int show_unhandled_signals = 1;
-
-int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned int write_access, unsigned int trapno)
-{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct *vma, *prev_vma;
- siginfo_t info;
- int fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
-
- tsk = current;
-
- if ((address >= VMALLOC_START) && (address < VMALLOC_END)) {
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- *
- * Do _not_ use "tsk" here. We might be inside
- * an interrupt in the middle of a task switch..
- */
- int offset = pgd_index(address);
- pgd_t *pgd, *pgd_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
-
- pgd = ((pgd_t *)mmu_get_base()) + offset;
- pgd_k = swapper_pg_dir + offset;
-
- /* This will never happen with the folded page table. */
- if (!pgd_present(*pgd)) {
- if (!pgd_present(*pgd_k))
- goto bad_area_nosemaphore;
- set_pgd(pgd, *pgd_k);
- return 0;
- }
-
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
- if (!pud_present(*pud_k))
- goto bad_area_nosemaphore;
- set_pud(pud, *pud_k);
-
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- goto bad_area_nosemaphore;
- set_pmd(pmd, *pmd_k);
-
- pte_k = pte_offset_kernel(pmd_k, address);
- if (!pte_present(*pte_k))
- goto bad_area_nosemaphore;
-
- /* May only be needed on Chorus2 */
- flush_tlb_all();
- return 0;
- }
-
- mm = tsk->mm;
-
- if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
- flags |= FAULT_FLAG_USER;
-retry:
- down_read(&mm->mmap_sem);
-
- vma = find_vma_prev(mm, address, &prev_vma);
-
- if (!vma || address < vma->vm_start)
- goto check_expansion;
-
-good_area:
- if (write_access) {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- flags |= FAULT_FLAG_WRITE;
- } else {
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto bad_area;
- }
-
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- fault = handle_mm_fault(vma, address, flags);
-
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
- return 0;
-
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGSEGV)
- goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
- }
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
-
- /*
- * No need to up_read(&mm->mmap_sem) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
-
- goto retry;
- }
- }
-
- up_read(&mm->mmap_sem);
- return 0;
-
-check_expansion:
- vma = prev_vma;
- if (vma && (expand_stack(vma, address) == 0))
- goto good_area;
-
-bad_area:
- up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
- if (user_mode(regs)) {
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (__force void __user *)address;
- info.si_trapno = trapno;
-
- if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
- printk_ratelimit()) {
- printk("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
- task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
- tsk->comm, task_pid_nr(tsk), address,
- regs->ctx.CurrPC, regs->ctx.AX[0].U0,
- write_access, trapno, trap_name(trapno));
- print_vma_addr(" in ", regs->ctx.CurrPC);
- print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
- printk("\n");
- show_regs(regs);
- }
- force_sig_info(SIGSEGV, &info, tsk);
- return 1;
- }
- goto no_context;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
-
- /*
- * Send a sigbus, regardless of whether we were in kernel
- * or user mode.
- */
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (__force void __user *)address;
- info.si_trapno = trapno;
- force_sig_info(SIGBUS, &info, tsk);
-
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
- goto no_context;
-
- return 1;
-
- /*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (user_mode(regs)) {
- pagefault_out_of_memory();
- return 1;
- }
-
-no_context:
- /* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs)) {
- clear_cbuf_entry(regs, address, trapno);
- return 1;
- }
-
- die("Oops", regs, (write_access << 15) | trapno, address);
- do_exit(SIGKILL);
-}
diff --git a/arch/metag/mm/highmem.c b/arch/metag/mm/highmem.c
deleted file mode 100644
index 83527fc..0000000
--- a/arch/metag/mm/highmem.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/export.h>
-#include <linux/highmem.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <asm/fixmap.h>
-#include <asm/tlbflush.h>
-
-static pte_t *kmap_pte;
-
-unsigned long highstart_pfn, highend_pfn;
-
-void *kmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-EXPORT_SYMBOL(kmap);
-
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-
-void *kmap_atomic(struct page *page)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
- int type;
-
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int idx, type;
-
- if (kvaddr >= (void *)FIXADDR_START) {
- type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
-
- /*
- * Force other mappings to Oops if they'll try to access this
- * pte without first remap it. Keeping stale mappings around
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
-
- kmap_atomic_idx_pop();
- }
-
- pagefault_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
- int type;
-
- preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
-
- return (void *)vaddr;
-}
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-}
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
deleted file mode 100644
index 012ee4c..0000000
--- a/arch/metag/mm/hugetlbpage.c
+++ /dev/null
@@ -1,251 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/metag/mm/hugetlbpage.c
- *
- * METAG HugeTLB page support.
- *
- * Cloned from SuperH
- *
- * Cloned from sparc64 by Paul Mundt.
- *
- * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/pagemap.h>
-#include <linux/sysctl.h>
-
-#include <asm/mman.h>
-#include <asm/pgalloc.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-int prepare_hugepage_range(struct file *file, unsigned long addr,
- unsigned long len)
-{
- struct mm_struct *mm = current->mm;
- struct hstate *h = hstate_file(file);
- struct vm_area_struct *vma;
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
- if (TASK_SIZE - len < addr)
- return -EINVAL;
-
- vma = find_vma(mm, ALIGN_HUGEPT(addr));
- if (vma && !(vma->vm_flags & MAP_HUGETLB))
- return -EINVAL;
-
- vma = find_vma(mm, addr);
- if (vma) {
- if (addr + len > vma->vm_start)
- return -EINVAL;
- if (!(vma->vm_flags & MAP_HUGETLB) &&
- (ALIGN_HUGEPT(addr + len) > vma->vm_start))
- return -EINVAL;
- }
- return 0;
-}
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset(mm, addr);
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
- pte = pte_alloc_map(mm, pmd, addr);
- pgd->pgd &= ~_PAGE_SZ_MASK;
- pgd->pgd |= _PAGE_SZHUGE;
-
- return pte;
-}
-
-pte_t *huge_pte_offset(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
- pte = pte_offset_kernel(pmd, addr);
-
- return pte;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return pmd_page_shift(pmd) > PAGE_SHIFT;
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
-
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- return NULL;
-}
-
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-
-/*
- * Look for an unmapped area starting after another hugetlb vma.
- * There are guaranteed to be no huge pte's spare if all the huge pages are
- * full size (4MB), so in that case compile out this search.
- */
-#if HPAGE_SHIFT == HUGEPT_SHIFT
-static inline unsigned long
-hugetlb_get_unmapped_area_existing(unsigned long len)
-{
- return 0;
-}
-#else
-static unsigned long
-hugetlb_get_unmapped_area_existing(unsigned long len)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long start_addr, addr;
- int after_huge;
-
- if (mm->context.part_huge) {
- start_addr = mm->context.part_huge;
- after_huge = 1;
- } else {
- start_addr = TASK_UNMAPPED_BASE;
- after_huge = 0;
- }
-new_search:
- addr = start_addr;
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- if ((!vma && !after_huge) || TASK_SIZE - len < addr) {
- /*
- * Start a new search - just in case we missed
- * some holes.
- */
- if (start_addr != TASK_UNMAPPED_BASE) {
- start_addr = TASK_UNMAPPED_BASE;
- goto new_search;
- }
- return 0;
- }
- /* skip ahead if we've aligned right over some vmas */
- if (vma && vma->vm_end <= addr)
- continue;
- /* space before the next vma? */
- if (after_huge && (!vma || ALIGN_HUGEPT(addr + len)
- <= vma->vm_start)) {
- unsigned long end = addr + len;
- if (end & HUGEPT_MASK)
- mm->context.part_huge = end;
- else if (addr == mm->context.part_huge)
- mm->context.part_huge = 0;
- return addr;
- }
- if (vma->vm_flags & MAP_HUGETLB) {
- /* space after a huge vma in 2nd level page table? */
- if (vma->vm_end & HUGEPT_MASK) {
- after_huge = 1;
- /* no need to align to the next PT block */
- addr = vma->vm_end;
- continue;
- }
- }
- after_huge = 0;
- addr = ALIGN_HUGEPT(vma->vm_end);
- }
-}
-#endif
-
-/* Do a full search to find an area without any nearby normal pages. */
-static unsigned long
-hugetlb_get_unmapped_area_new_pmd(unsigned long len)
-{
- struct vm_unmapped_area_info info;
-
- info.flags = 0;
- info.length = len;
- info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
- info.align_mask = PAGE_MASK & HUGEPT_MASK;
- info.align_offset = 0;
- return vm_unmapped_area(&info);
-}
-
-unsigned long
-hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct hstate *h = hstate_file(file);
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(file, addr, len))
- return -EINVAL;
- return addr;
- }
-
- if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
- if (!prepare_hugepage_range(file, addr, len))
- return addr;
- }
-
- /*
- * Look for an existing hugetlb vma with space after it (this is to to
- * minimise fragmentation caused by huge pages.
- */
- addr = hugetlb_get_unmapped_area_existing(len);
- if (addr)
- return addr;
-
- /*
- * Find an unmapped naturally aligned set of 4MB blocks that we can use
- * for huge pages.
- */
- return hugetlb_get_unmapped_area_new_pmd(len);
-}
-
-#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
-
-/* necessary for boot time 4MB huge page allocation */
-static __init int setup_hugepagesz(char *opt)
-{
- unsigned long ps = memparse(opt, &opt);
- if (ps == (1 << HPAGE_SHIFT)) {
- hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
- } else {
- hugetlb_bad_size();
- pr_err("hugepagesz: Unsupported page size %lu M\n",
- ps >> 20);
- return 0;
- }
- return 1;
-}
-__setup("hugepagesz=", setup_hugepagesz);
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
deleted file mode 100644
index 0e2ca90..0000000
--- a/arch/metag/mm/init.c
+++ /dev/null
@@ -1,408 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
- *
- */
-
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/pagemap.h>
-#include <linux/percpu.h>
-#include <linux/memblock.h>
-#include <linux/initrd.h>
-#include <linux/sched/task.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-#include <asm/sections.h>
-#include <asm/tlb.h>
-#include <asm/user_gateway.h>
-#include <asm/mmzone.h>
-#include <asm/fixmap.h>
-
-unsigned long pfn_base;
-EXPORT_SYMBOL(pfn_base);
-
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
-
-unsigned long empty_zero_page;
-EXPORT_SYMBOL(empty_zero_page);
-
-extern char __user_gateway_start;
-extern char __user_gateway_end;
-
-void *gateway_page;
-
-/*
- * Insert the gateway page into a set of page tables, creating the
- * page tables if necessary.
- */
-static void insert_gateway_page(pgd_t *pgd, unsigned long address)
-{
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- BUG_ON(!pgd_present(*pgd));
-
- pud = pud_offset(pgd, address);
- BUG_ON(!pud_present(*pud));
-
- pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd)) {
- pte = alloc_bootmem_pages(PAGE_SIZE);
- set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
- }
-
- pte = pte_offset_kernel(pmd, address);
- set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
-}
-
-/* Alloc and map a page in a known location accessible to userspace. */
-static void __init user_gateway_init(void)
-{
- unsigned long address = USER_GATEWAY_PAGE;
- int offset = pgd_index(address);
- pgd_t *pgd;
-
- gateway_page = alloc_bootmem_pages(PAGE_SIZE);
-
- pgd = swapper_pg_dir + offset;
- insert_gateway_page(pgd, address);
-
-#ifdef CONFIG_METAG_META12
- /*
- * Insert the gateway page into our current page tables even
- * though we've already inserted it into our reference page
- * table (swapper_pg_dir). This is because with a META1 mmu we
- * copy just the user address range and not the gateway page
- * entry on context switch, see switch_mmu().
- */
- pgd = (pgd_t *)mmu_get_base() + offset;
- insert_gateway_page(pgd, address);
-#endif /* CONFIG_METAG_META12 */
-
- BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
-
- gateway_page += (address & ~PAGE_MASK);
-
- memcpy(gateway_page, &__user_gateway_start,
- &__user_gateway_end - &__user_gateway_start);
-
- /*
- * We don't need to flush the TLB here, there should be no mapping
- * present at boot for this address and only valid mappings are in
- * the TLB (apart from on Meta 1.x, but those cached invalid
- * mappings should be impossible to hit here).
- *
- * We don't flush the code cache here even though we have written
- * code through the data cache and they may not be coherent. At
- * this point we assume there is no stale data in the code cache
- * for this address so there is no need to flush.
- */
-}
-
-static void __init allocate_pgdat(unsigned int nid)
-{
- unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
- unsigned long phys;
-#endif
-
- get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
-
-#ifdef CONFIG_NEED_MULTIPLE_NODES
- phys = __memblock_alloc_base(sizeof(struct pglist_data),
- SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
- /* Retry with all of system memory */
- if (!phys)
- phys = __memblock_alloc_base(sizeof(struct pglist_data),
- SMP_CACHE_BYTES,
- memblock_end_of_DRAM());
- if (!phys)
- panic("Can't allocate pgdat for node %d\n", nid);
-
- NODE_DATA(nid) = __va(phys);
- memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
-
- NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
-#endif
-
- NODE_DATA(nid)->node_start_pfn = start_pfn;
- NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
-}
-
-static void __init bootmem_init_one_node(unsigned int nid)
-{
- unsigned long total_pages, paddr;
- unsigned long end_pfn;
- struct pglist_data *p;
-
- p = NODE_DATA(nid);
-
- /* Nothing to do.. */
- if (!p->node_spanned_pages)
- return;
-
- end_pfn = pgdat_end_pfn(p);
-#ifdef CONFIG_HIGHMEM
- if (end_pfn > max_low_pfn)
- end_pfn = max_low_pfn;
-#endif
-
- total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
-
- paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
- if (!paddr)
- panic("Can't allocate bootmap for nid[%d]\n", nid);
-
- init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
-
- free_bootmem_with_active_regions(nid, end_pfn);
-
- /*
- * XXX Handle initial reservations for the system memory node
- * only for the moment, we'll refactor this later for handling
- * reservations in other nodes.
- */
- if (nid == 0) {
- struct memblock_region *reg;
-
- /* Reserve the sections we're already using. */
- for_each_memblock(reserved, reg) {
- unsigned long size = reg->size;
-
-#ifdef CONFIG_HIGHMEM
- /* ...but not highmem */
- if (PFN_DOWN(reg->base) >= highstart_pfn)
- continue;
-
- if (PFN_UP(reg->base + size) > highstart_pfn)
- size = (highstart_pfn - PFN_DOWN(reg->base))
- << PAGE_SHIFT;
-#endif
-
- reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
- }
- }
-
- sparse_memory_present_with_active_regions(nid);
-}
-
-static void __init do_init_bootmem(void)
-{
- struct memblock_region *reg;
- int i;
-
- /* Add active regions with valid PFNs. */
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
- memblock_set_node(PFN_PHYS(start_pfn),
- PFN_PHYS(end_pfn - start_pfn),
- &memblock.memory, 0);
- }
-
- /* All of system RAM sits in node 0 for the non-NUMA case */
- allocate_pgdat(0);
- node_set_online(0);
-
- soc_mem_setup();
-
- for_each_online_node(i)
- bootmem_init_one_node(i);
-
- sparse_init();
-}
-
-extern char _heap_start[];
-
-static void __init init_and_reserve_mem(void)
-{
- unsigned long start_pfn, heap_start;
- u64 base = min_low_pfn << PAGE_SHIFT;
- u64 size = (max_low_pfn << PAGE_SHIFT) - base;
-
- heap_start = (unsigned long) &_heap_start;
-
- memblock_add(base, size);
-
- /*
- * Partially used pages are not usable - thus
- * we are rounding upwards:
- */
- start_pfn = PFN_UP(__pa(heap_start));
-
- /*
- * Reserve the kernel text.
- */
- memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
-
-#ifdef CONFIG_HIGHMEM
- /*
- * Add & reserve highmem, so page structures are initialised.
- */
- base = highstart_pfn << PAGE_SHIFT;
- size = (highend_pfn << PAGE_SHIFT) - base;
- if (size) {
- memblock_add(base, size);
- memblock_reserve(base, size);
- }
-#endif
-}
-
-#ifdef CONFIG_HIGHMEM
-/*
- * Ensure we have allocated page tables in swapper_pg_dir for the
- * fixed mappings range from 'start' to 'end'.
- */
-static void __init allocate_pgtables(unsigned long start, unsigned long end)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- int i, j;
- unsigned long vaddr;
-
- vaddr = start;
- i = pgd_index(vaddr);
- j = pmd_index(vaddr);
- pgd = swapper_pg_dir + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
- pmd = (pmd_t *)pgd;
- for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
- vaddr += PMD_SIZE;
-
- if (!pmd_none(*pmd))
- continue;
-
- pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, pte);
- }
- j = 0;
- }
-}
-
-static void __init fixedrange_init(void)
-{
- unsigned long vaddr, end;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- /*
- * Fixed mappings:
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
- allocate_pgtables(vaddr, end);
-
- /*
- * Permanent kmaps:
- */
- vaddr = PKMAP_BASE;
- allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
-
- pgd = swapper_pg_dir + pgd_index(vaddr);
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
- pkmap_page_table = pte;
-}
-#endif /* CONFIG_HIGHMEM */
-
-/*
- * paging_init() continues the virtual memory environment setup which
- * was begun by the code in arch/metag/kernel/setup.c.
- */
-void __init paging_init(unsigned long mem_end)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
- int nid;
-
- init_and_reserve_mem();
-
- memblock_allow_resize();
-
- memblock_dump_all();
-
- nodes_clear(node_online_map);
-
- init_new_context(&init_task, &init_mm);
-
- memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
-
- do_init_bootmem();
- mmu_init(mem_end);
-
-#ifdef CONFIG_HIGHMEM
- fixedrange_init();
- kmap_init();
-#endif
-
- /* Initialize the zero page to a bootmem page, already zeroed. */
- empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
-
- user_gateway_init();
-
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-
- for_each_online_node(nid) {
- pg_data_t *pgdat = NODE_DATA(nid);
- unsigned long low, start_pfn;
-
- start_pfn = pgdat->bdata->node_min_pfn;
- low = pgdat->bdata->node_low_pfn;
-
- if (max_zone_pfns[ZONE_NORMAL] < low)
- max_zone_pfns[ZONE_NORMAL] = low;
-
-#ifdef CONFIG_HIGHMEM
- max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
-#endif
- pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
- nid, start_pfn, low);
- }
-
- free_area_init_nodes(max_zone_pfns);
-}
-
-void __init mem_init(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- /*
- * Explicitly reset zone->managed_pages because highmem pages are
- * freed before calling free_all_bootmem();
- */
- reset_all_zones_managed_pages();
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
-#endif /* CONFIG_HIGHMEM */
-
- free_all_bootmem();
- mem_init_print_info(NULL);
-}
-
-void free_initmem(void)
-{
- free_initmem_default(POISON_FREE_INITMEM);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
- "initrd");
-}
-#endif
diff --git a/arch/metag/mm/ioremap.c b/arch/metag/mm/ioremap.c
deleted file mode 100644
index df2b59c..0000000
--- a/arch/metag/mm/ioremap.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Re-map IO memory to kernel address space so that we can access it.
- * Needed for memory-mapped I/O devices mapped outside our normal DRAM
- * window (that is, all memory-mapped I/O devices).
- *
- * Copyright (C) 1995,1996 Linus Torvalds
- *
- * Meta port based on CRIS-port by Axis Communications AB
- */
-
-#include <linux/vmalloc.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-
-#include <asm/pgtable.h>
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void __iomem *__ioremap(unsigned long phys_addr, size_t size,
- unsigned long flags)
-{
- unsigned long addr;
- struct vm_struct *area;
- unsigned long offset, last_addr;
- pgprot_t prot;
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
- /* Custom region addresses are accessible and uncached by default. */
- if (phys_addr >= LINSYSCUSTOM_BASE &&
- phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT))
- return (__force void __iomem *) phys_addr;
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr+1) - phys_addr;
- prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY |
- _PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 |
- flags);
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- area->phys_addr = phys_addr;
- addr = (unsigned long) area->addr;
- if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
- vunmap((void *) addr);
- return NULL;
- }
- return (__force void __iomem *) (offset + (char *)addr);
-}
-EXPORT_SYMBOL(__ioremap);
-
-void __iounmap(void __iomem *addr)
-{
- struct vm_struct *p;
-
- if ((__force unsigned long)addr >= LINSYSCUSTOM_BASE &&
- (__force unsigned long)addr < (LINSYSCUSTOM_BASE +
- LINSYSCUSTOM_LIMIT))
- return;
-
- p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
- if (unlikely(!p)) {
- pr_err("iounmap: bad address %p\n", addr);
- return;
- }
-
- kfree(p);
-}
-EXPORT_SYMBOL(__iounmap);
diff --git a/arch/metag/mm/l2cache.c b/arch/metag/mm/l2cache.c
deleted file mode 100644
index addffc5..0000000
--- a/arch/metag/mm/l2cache.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-
-#include <asm/l2cache.h>
-#include <asm/metag_isa.h>
-
-/* If non-0, then initialise the L2 cache */
-static int l2cache_init = 1;
-/* If non-0, then initialise the L2 cache prefetch */
-static int l2cache_init_pf = 1;
-
-int l2c_pfenable;
-
-static volatile u32 l2c_testdata[16] __initdata __aligned(64);
-
-static int __init parse_l2cache(char *p)
-{
- char *cp = p;
-
- if (get_option(&cp, &l2cache_init) != 1) {
- pr_err("Bad l2cache parameter (%s)\n", p);
- return 1;
- }
- return 0;
-}
-early_param("l2cache", parse_l2cache);
-
-static int __init parse_l2cache_pf(char *p)
-{
- char *cp = p;
-
- if (get_option(&cp, &l2cache_init_pf) != 1) {
- pr_err("Bad l2cache_pf parameter (%s)\n", p);
- return 1;
- }
- return 0;
-}
-early_param("l2cache_pf", parse_l2cache_pf);
-
-static int __init meta_l2c_setup(void)
-{
- /*
- * If the L2 cache isn't even present, don't do anything, but say so in
- * the log.
- */
- if (!meta_l2c_is_present()) {
- pr_info("L2 Cache: Not present\n");
- return 0;
- }
-
- /*
- * Check whether the line size is recognised.
- */
- if (!meta_l2c_linesize()) {
- pr_warn_once("L2 Cache: unknown line size id (config=0x%08x)\n",
- meta_l2c_config());
- }
-
- /*
- * Initialise state.
- */
- l2c_pfenable = _meta_l2c_pf_is_enabled();
-
- /*
- * Enable the L2 cache and print to log whether it was already enabled
- * by the bootloader.
- */
- if (l2cache_init) {
- pr_info("L2 Cache: Enabling... ");
- if (meta_l2c_enable())
- pr_cont("already enabled\n");
- else
- pr_cont("done\n");
- } else {
- pr_info("L2 Cache: Not enabling\n");
- }
-
- /*
- * Enable L2 cache prefetch.
- */
- if (l2cache_init_pf) {
- pr_info("L2 Cache: Enabling prefetch... ");
- if (meta_l2c_pf_enable(1))
- pr_cont("already enabled\n");
- else
- pr_cont("done\n");
- } else {
- pr_info("L2 Cache: Not enabling prefetch\n");
- }
-
- return 0;
-}
-core_initcall(meta_l2c_setup);
-
-int meta_l2c_disable(void)
-{
- unsigned long flags;
- int en;
-
- if (!meta_l2c_is_present())
- return 1;
-
- /*
- * Prevent other threads writing during the writeback, otherwise the
- * writes will get "lost" when the L2 is disabled.
- */
- __global_lock2(flags);
- en = meta_l2c_is_enabled();
- if (likely(en)) {
- _meta_l2c_pf_enable(0);
- wr_fence();
- _meta_l2c_purge();
- _meta_l2c_enable(0);
- }
- __global_unlock2(flags);
-
- return !en;
-}
-
-int meta_l2c_enable(void)
-{
- unsigned long flags;
- int en;
-
- if (!meta_l2c_is_present())
- return 0;
-
- /*
- * Init (clearing the L2) can happen while the L2 is disabled, so other
- * threads are safe to continue executing, however we must not init the
- * cache if it's already enabled (dirty lines would be discarded), so
- * this operation should still be atomic with other threads.
- */
- __global_lock1(flags);
- en = meta_l2c_is_enabled();
- if (likely(!en)) {
- _meta_l2c_init();
- _meta_l2c_enable(1);
- _meta_l2c_pf_enable(l2c_pfenable);
- }
- __global_unlock1(flags);
-
- return en;
-}
-
-int meta_l2c_pf_enable(int pfenable)
-{
- unsigned long flags;
- int en = l2c_pfenable;
-
- if (!meta_l2c_is_present())
- return 0;
-
- /*
- * We read modify write the enable register, so this operation must be
- * atomic with other threads.
- */
- __global_lock1(flags);
- en = l2c_pfenable;
- l2c_pfenable = pfenable;
- if (meta_l2c_is_enabled())
- _meta_l2c_pf_enable(pfenable);
- __global_unlock1(flags);
-
- return en;
-}
-
-int meta_l2c_flush(void)
-{
- unsigned long flags;
- int en;
-
- /*
- * Prevent other threads writing during the writeback. This also
- * involves read modify writes.
- */
- __global_lock2(flags);
- en = meta_l2c_is_enabled();
- if (likely(en)) {
- _meta_l2c_pf_enable(0);
- wr_fence();
- _meta_l2c_purge();
- _meta_l2c_enable(0);
- _meta_l2c_init();
- _meta_l2c_enable(1);
- _meta_l2c_pf_enable(l2c_pfenable);
- }
- __global_unlock2(flags);
-
- return !en;
-}
diff --git a/arch/metag/mm/maccess.c b/arch/metag/mm/maccess.c
deleted file mode 100644
index c227551..0000000
--- a/arch/metag/mm/maccess.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * safe read and write memory routines callable while atomic
- *
- * Copyright 2012 Imagination Technologies
- */
-
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-/*
- * The generic probe_kernel_write() uses the user copy code which can split the
- * writes if the source is unaligned, and repeats writes to make exceptions
- * precise. We override it here to avoid these things happening to memory mapped
- * IO memory where they could have undesired effects.
- * Due to the use of CACHERD instruction this only works on Meta2 onwards.
- */
-#ifdef CONFIG_METAG_META21
-long probe_kernel_write(void *dst, const void *src, size_t size)
-{
- unsigned long ldst = (unsigned long)dst;
- void __iomem *iodst = (void __iomem *)dst;
- unsigned long lsrc = (unsigned long)src;
- const u8 *psrc = (u8 *)src;
- unsigned int pte, i;
- u8 bounce[8] __aligned(8);
-
- if (!size)
- return 0;
-
- /* Use the write combine bit to decide is the destination is MMIO. */
- pte = __builtin_meta2_cacherd(dst);
-
- /* Check the mapping is valid and writeable. */
- if ((pte & (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT))
- != (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT))
- return -EFAULT;
-
- /* Fall back to generic version for cases we're not interested in. */
- if (pte & MMCU_ENTRY_WRC_BIT || /* write combined memory */
- (ldst & (size - 1)) || /* destination unaligned */
- size > 8 || /* more than max write size */
- (size & (size - 1))) /* non power of 2 size */
- return __probe_kernel_write(dst, src, size);
-
- /* If src is unaligned, copy to the aligned bounce buffer first. */
- if (lsrc & (size - 1)) {
- for (i = 0; i < size; ++i)
- bounce[i] = psrc[i];
- psrc = bounce;
- }
-
- switch (size) {
- case 1:
- writeb(*psrc, iodst);
- break;
- case 2:
- writew(*(const u16 *)psrc, iodst);
- break;
- case 4:
- writel(*(const u32 *)psrc, iodst);
- break;
- case 8:
- writeq(*(const u64 *)psrc, iodst);
- break;
- }
- return 0;
-}
-#endif
diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c
deleted file mode 100644
index 53190b1..0000000
--- a/arch/metag/mm/mmu-meta1.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005,2006,2007,2008,2009 Imagination Technologies
- *
- * Meta 1 MMU handling code.
- *
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-
-#include <asm/mmu.h>
-
-#define DM3_BASE (LINSYSDIRECT_BASE + (MMCU_DIRECTMAPn_ADDR_SCALE * 3))
-
-/*
- * This contains the physical address of the top level 2k pgd table.
- */
-static unsigned long mmu_base_phys;
-
-/*
- * Given a physical address, return a mapped virtual address that can be used
- * to access that location.
- * In practice, we use the DirectMap region to make this happen.
- */
-static unsigned long map_addr(unsigned long phys)
-{
- static unsigned long dm_base = 0xFFFFFFFF;
- int offset;
-
- offset = phys - dm_base;
-
- /* Are we in the current map range ? */
- if ((offset < 0) || (offset >= MMCU_DIRECTMAPn_ADDR_SCALE)) {
- /* Calculate new DM area */
- dm_base = phys & ~(MMCU_DIRECTMAPn_ADDR_SCALE - 1);
-
- /* Actually map it in! */
- metag_out32(dm_base, MMCU_DIRECTMAP3_ADDR);
-
- /* And calculate how far into that area our reference is */
- offset = phys - dm_base;
- }
-
- return DM3_BASE + offset;
-}
-
-/*
- * Return the physical address of the base of our pgd table.
- */
-static inline unsigned long __get_mmu_base(void)
-{
- unsigned long base_phys;
- unsigned int stride;
-
- if (is_global_space(PAGE_OFFSET))
- stride = 4;
- else
- stride = hard_processor_id(); /* [0..3] */
-
- base_phys = metag_in32(MMCU_TABLE_PHYS_ADDR);
- base_phys += (0x800 * stride);
-
- return base_phys;
-}
-
-/* Given a virtual address, return the virtual address of the relevant pgd */
-static unsigned long pgd_entry_addr(unsigned long virt)
-{
- unsigned long pgd_phys;
- unsigned long pgd_virt;
-
- if (!mmu_base_phys)
- mmu_base_phys = __get_mmu_base();
-
- /*
- * Are we trying to map a global address. If so, then index
- * the global pgd table instead of our local one.
- */
- if (is_global_space(virt)) {
- /* Scale into 2gig map */
- virt &= ~0x80000000;
- }
-
- /* Base of the pgd table plus our 4Meg entry, 4bytes each */
- pgd_phys = mmu_base_phys + ((virt >> PGDIR_SHIFT) * 4);
-
- pgd_virt = map_addr(pgd_phys);
-
- return pgd_virt;
-}
-
-/* Given a virtual address, return the virtual address of the relevant pte */
-static unsigned long pgtable_entry_addr(unsigned long virt)
-{
- unsigned long pgtable_phys;
- unsigned long pgtable_virt, pte_virt;
-
- /* Find the physical address of the 4MB page table*/
- pgtable_phys = metag_in32(pgd_entry_addr(virt)) & MMCU_ENTRY_ADDR_BITS;
-
- /* Map it to a virtual address */
- pgtable_virt = map_addr(pgtable_phys);
-
- /* And index into it for our pte */
- pte_virt = pgtable_virt + ((virt >> PAGE_SHIFT) & 0x3FF) * 4;
-
- return pte_virt;
-}
-
-unsigned long mmu_read_first_level_page(unsigned long vaddr)
-{
- return metag_in32(pgd_entry_addr(vaddr));
-}
-
-unsigned long mmu_read_second_level_page(unsigned long vaddr)
-{
- return metag_in32(pgtable_entry_addr(vaddr));
-}
-
-unsigned long mmu_get_base(void)
-{
- static unsigned long __base;
-
- /* Find the base of our MMU pgd table */
- if (!__base)
- __base = pgd_entry_addr(0);
-
- return __base;
-}
-
-void __init mmu_init(unsigned long mem_end)
-{
- unsigned long entry, addr;
- pgd_t *p_swapper_pg_dir;
-
- /*
- * Now copy over any MMU pgd entries already in the mmu page tables
- * over to our root init process (swapper_pg_dir) map. This map is
- * then inherited by all other processes, which means all processes
- * inherit a map of the kernel space.
- */
- addr = PAGE_OFFSET;
- entry = pgd_index(PAGE_OFFSET);
- p_swapper_pg_dir = pgd_offset_k(0) + entry;
-
- while (addr <= META_MEMORY_LIMIT) {
- unsigned long pgd_entry;
- /* copy over the current MMU value */
- pgd_entry = mmu_read_first_level_page(addr);
- pgd_val(*p_swapper_pg_dir) = pgd_entry;
-
- p_swapper_pg_dir++;
- addr += PGDIR_SIZE;
- }
-}
diff --git a/arch/metag/mm/mmu-meta2.c b/arch/metag/mm/mmu-meta2.c
deleted file mode 100644
index 8b668a6..0000000
--- a/arch/metag/mm/mmu-meta2.c
+++ /dev/null
@@ -1,208 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
- *
- * Meta 2 enhanced mode MMU handling code.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/bootmem.h>
-#include <linux/syscore_ops.h>
-
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-
-unsigned long mmu_read_first_level_page(unsigned long vaddr)
-{
- unsigned int cpu = hard_processor_id();
- unsigned long offset, linear_base, linear_limit;
- unsigned int phys0;
- pgd_t *pgd, entry;
-
- if (is_global_space(vaddr))
- vaddr &= ~0x80000000;
-
- offset = vaddr >> PGDIR_SHIFT;
-
- phys0 = metag_in32(mmu_phys0_addr(cpu));
-
- /* Top bit of linear base is always zero. */
- linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff;
-
- /* Limit in the range 0 (4MB) to 9 (2GB). */
- linear_limit = 1 << ((phys0 >> 8) & 0xf);
- linear_limit += linear_base;
-
- /*
- * If offset is below linear base or above the limit then no
- * mapping exists.
- */
- if (offset < linear_base || offset > linear_limit)
- return 0;
-
- offset -= linear_base;
- pgd = (pgd_t *)mmu_get_base();
- entry = pgd[offset];
-
- return pgd_val(entry);
-}
-
-unsigned long mmu_read_second_level_page(unsigned long vaddr)
-{
- return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK));
-}
-
-unsigned long mmu_get_base(void)
-{
- unsigned int cpu = hard_processor_id();
- unsigned long stride;
-
- stride = cpu * LINSYSMEMTnX_STRIDE;
-
- /*
- * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
- * used as an offset to the start of the top-level pgd table.
- */
- stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc);
-
- if (is_global_space(PAGE_OFFSET))
- stride += LINSYSMEMTXG_OFFSET;
-
- return LINSYSMEMT0L_BASE + stride;
-}
-
-#define FIRST_LEVEL_MASK 0xffffffc0
-#define SECOND_LEVEL_MASK 0xfffff000
-#define SECOND_LEVEL_ALIGN 64
-
-static void repriv_mmu_tables(void)
-{
- unsigned long phys0_addr;
- unsigned int g;
-
- /*
- * Check that all the mmu table regions are priv protected, and if not
- * fix them and emit a warning. If we left them without priv protection
- * then userland processes would have access to a 2M window into
- * physical memory near where the page tables are.
- */
- phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0;
- for (g = 0; g < 2; ++g) {
- unsigned int t, phys0;
- unsigned long flags;
- for (t = 0; t < 4; ++t) {
- __global_lock2(flags);
- phys0 = metag_in32(phys0_addr);
- if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) {
- pr_warn("Fixing priv protection on T%d %s MMU table region\n",
- t,
- g ? "global" : "local");
- phys0 |= _PAGE_PRIV;
- metag_out32(phys0, phys0_addr);
- }
- __global_unlock2(flags);
-
- phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE;
- }
-
- phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET
- - 4*MMCU_TnX_TABLE_PHYSX_STRIDE;
- }
-}
-
-#ifdef CONFIG_METAG_SUSPEND_MEM
-static void mmu_resume(void)
-{
- /*
- * If a full suspend to RAM has happened then the original bad MMU table
- * priv may have been restored, so repriv them again.
- */
- repriv_mmu_tables();
-}
-#else
-#define mmu_resume NULL
-#endif /* CONFIG_METAG_SUSPEND_MEM */
-
-static struct syscore_ops mmu_syscore_ops = {
- .resume = mmu_resume,
-};
-
-void __init mmu_init(unsigned long mem_end)
-{
- unsigned long entry, addr;
- pgd_t *p_swapper_pg_dir;
-#ifdef CONFIG_KERNEL_4M_PAGES
- unsigned long mem_size = mem_end - PAGE_OFFSET;
- unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22);
- unsigned int second_level_entry = 0;
- unsigned long *second_level_table;
-#endif
-
- /*
- * Now copy over any MMU pgd entries already in the mmu page tables
- * over to our root init process (swapper_pg_dir) map. This map is
- * then inherited by all other processes, which means all processes
- * inherit a map of the kernel space.
- */
- addr = META_MEMORY_BASE;
- entry = pgd_index(META_MEMORY_BASE);
- p_swapper_pg_dir = pgd_offset_k(0) + entry;
-
- while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) {
- unsigned long pgd_entry;
- /* copy over the current MMU value */
- pgd_entry = mmu_read_first_level_page(addr);
- pgd_val(*p_swapper_pg_dir) = pgd_entry;
-
- p_swapper_pg_dir++;
- addr += PGDIR_SIZE;
- entry++;
- }
-
-#ifdef CONFIG_KERNEL_4M_PAGES
- /*
- * At this point we can also map the kernel with 4MB pages to
- * reduce TLB pressure.
- */
- second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages);
-
- addr = PAGE_OFFSET;
- entry = pgd_index(PAGE_OFFSET);
- p_swapper_pg_dir = pgd_offset_k(0) + entry;
-
- while (pages > 0) {
- unsigned long phys_addr, second_level_phys;
- pte_t *pte = (pte_t *)&second_level_table[second_level_entry];
-
- phys_addr = __pa(addr);
-
- second_level_phys = __pa(pte);
-
- pgd_val(*p_swapper_pg_dir) = ((second_level_phys &
- FIRST_LEVEL_MASK) |
- _PAGE_SZ_4M |
- _PAGE_PRESENT);
-
- pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) |
- _PAGE_PRESENT | _PAGE_DIRTY |
- _PAGE_ACCESSED | _PAGE_WRITE |
- _PAGE_CACHEABLE | _PAGE_KERNEL);
-
- p_swapper_pg_dir++;
- addr += PGDIR_SIZE;
- /* Second level pages must be 64byte aligned. */
- second_level_entry += (SECOND_LEVEL_ALIGN /
- sizeof(unsigned long));
- pages--;
- }
- load_pgd(swapper_pg_dir, hard_processor_id());
- flush_tlb_all();
-#endif
-
- repriv_mmu_tables();
- register_syscore_ops(&mmu_syscore_ops);
-}
diff --git a/arch/metag/mm/numa.c b/arch/metag/mm/numa.c
deleted file mode 100644
index 67b46c2..0000000
--- a/arch/metag/mm/numa.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Multiple memory node support for Meta machines
- *
- * Copyright (C) 2007 Paul Mundt
- * Copyright (C) 2010 Imagination Technologies Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/export.h>
-#include <linux/bootmem.h>
-#include <linux/memblock.h>
-#include <linux/mm.h>
-#include <linux/numa.h>
-#include <linux/pfn.h>
-#include <asm/sections.h>
-
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL_GPL(node_data);
-
-extern char _heap_start[];
-
-/*
- * On Meta machines the conventional approach is to stash system RAM
- * in node 0, and other memory blocks in to node 1 and up, ordered by
- * latency. Each node's pgdat is node-local at the beginning of the node,
- * immediately followed by the node mem map.
- */
-void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
-{
- unsigned long bootmap_pages, bootmem_paddr;
- unsigned long start_pfn, end_pfn;
- unsigned long pgdat_paddr;
-
- /* Don't allow bogus node assignment */
- BUG_ON(nid >= MAX_NUMNODES || nid <= 0);
-
- start_pfn = start >> PAGE_SHIFT;
- end_pfn = end >> PAGE_SHIFT;
-
- memblock_add(start, end - start);
-
- memblock_set_node(PFN_PHYS(start_pfn),
- PFN_PHYS(end_pfn - start_pfn),
- &memblock.memory, nid);
-
- /* Node-local pgdat */
- pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data),
- SMP_CACHE_BYTES, end);
- NODE_DATA(nid) = __va(pgdat_paddr);
- memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
-
- NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
- NODE_DATA(nid)->node_start_pfn = start_pfn;
- NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
-
- /* Node-local bootmap */
- bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
- bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
- PAGE_SIZE, end);
- init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
- start_pfn, end_pfn);
-
- free_bootmem_with_active_regions(nid, end_pfn);
-
- /* Reserve the pgdat and bootmap space with the bootmem allocator */
- reserve_bootmem_node(NODE_DATA(nid), pgdat_paddr & PAGE_MASK,
- sizeof(struct pglist_data), BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
- bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
-
- /* It's up */
- node_set_online(nid);
-
- /* Kick sparsemem */
- sparse_memory_present_with_active_regions(nid);
-}
-
-void __init __weak soc_mem_setup(void)
-{
-}
OpenPOWER on IntegriCloud