summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mm/init.c
blob: 355b87aa6b9322da7c1e5074e0eb5170fe6d5de3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
/*
 * Copyright 2004-2009 Analog Devices Inc.
 *
 * Licensed under the GPL-2 or later.
 */

#include <linux/gfp.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/uaccess.h>
#include <asm/bfin-global.h>
#include <asm/pda.h>
#include <asm/cplbinit.h>
#include <asm/early_printk.h>
#include "blackfin_sram.h"

/*
 * BAD_PAGE is the page that is used for page faults when linux
 * is out-of-memory. Older versions of linux just did a
 * do_exit(), but using this instead means there is less risk
 * for a process dying in kernel mode, possibly leaving a inode
 * unused etc..
 *
 * BAD_PAGETABLE is the accompanying page-table: it is initialized
 * to point to BAD_PAGE entries.
 *
 * ZERO_PAGE is a special page that is used for zero-initialized
 * data and COW.
 */
static unsigned long empty_bad_page_table;

static unsigned long empty_bad_page;

static unsigned long empty_zero_page;

#ifndef CONFIG_EXCEPTION_L1_SCRATCH
#if defined CONFIG_SYSCALL_TAB_L1
__attribute__((l1_data))
#endif
static unsigned long exception_stack[NR_CPUS][1024];
#endif

struct blackfin_pda cpu_pda[NR_CPUS];
EXPORT_SYMBOL(cpu_pda);

/*
 * paging_init() continues the virtual memory environment setup which
 * was begun by the code in arch/head.S.
 * The parameters are pointers to where to stick the starting and ending
 * addresses  of available kernel virtual memory.
 */
void __init paging_init(void)
{
	/*
	 * make sure start_mem is page aligned,  otherwise bootmem and
	 * page_alloc get different views og the world
	 */
	unsigned long end_mem = memory_end & PAGE_MASK;

	pr_debug("start_mem is %#lx   virtual_end is %#lx\n", PAGE_ALIGN(memory_start), end_mem);

	/*
	 * initialize the bad page table and bad page to point
	 * to a couple of allocated pages
	 */
	empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	memset((void *)empty_zero_page, 0, PAGE_SIZE);

	/*
	 * Set up SFC/DFC registers (user data space)
	 */
	set_fs(KERNEL_DS);

	pr_debug("free_area_init -> start_mem is %#lx   virtual_end is %#lx\n",
	        PAGE_ALIGN(memory_start), end_mem);

	{
		unsigned long zones_size[MAX_NR_ZONES] = { 0, };

		zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
		zones_size[ZONE_NORMAL] = 0;
#ifdef CONFIG_HIGHMEM
		zones_size[ZONE_HIGHMEM] = 0;
#endif
		free_area_init(zones_size);
	}
}

asmlinkage void __init init_pda(void)
{
	unsigned int cpu = raw_smp_processor_id();

	early_shadow_stamp();

	/* Initialize the PDA fields holding references to other parts
	   of the memory. The content of such memory is still
	   undefined at the time of the call, we are only setting up
	   valid pointers to it. */
	memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu]));

	cpu_pda[0].next = &cpu_pda[1];
	cpu_pda[1].next = &cpu_pda[0];

#ifdef CONFIG_EXCEPTION_L1_SCRATCH
	cpu_pda[cpu].ex_stack = (unsigned long *)(L1_SCRATCH_START + \
					L1_SCRATCH_LENGTH);
#else
	cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
#endif

#ifdef CONFIG_SMP
	cpu_pda[cpu].imask = 0x1f;
#endif
}

void __init mem_init(void)
{
	unsigned int codek = 0, datak = 0, initk = 0;
	unsigned int reservedpages = 0, freepages = 0;
	unsigned long tmp;
	unsigned long start_mem = memory_start;
	unsigned long end_mem = memory_end;

	end_mem &= PAGE_MASK;
	high_memory = (void *)end_mem;

	start_mem = PAGE_ALIGN(start_mem);
	max_mapnr = num_physpages = MAP_NR(high_memory);
	printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages);

	/* This will put all memory onto the freelists. */
	totalram_pages = free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < max_mapnr; tmp++)
		if (PageReserved(pfn_to_page(tmp)))
			reservedpages++;
	freepages =  max_mapnr - reservedpages;

	/* do not count in kernel image between _rambase and _ramstart */
	reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT;
#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
	reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT;
#endif

	codek = (_etext - _stext) >> 10;
	initk = (__init_end - __init_begin) >> 10;
	datak = ((_ramstart - _rambase) >> 10) - codek - initk;

	printk(KERN_INFO
	     "Memory available: %luk/%luk RAM, "
		"(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n",
		(unsigned long) freepages << (PAGE_SHIFT-10), _ramend >> 10,
		initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
}

static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
	unsigned long addr;
	/* next to check that the page we free is not a partial page */
	for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		free_page(addr);
		totalram_pages++;
	}
	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}

#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
#ifndef CONFIG_MPU
	free_init_pages("initrd memory", start, end);
#endif
}
#endif

void __init_refok free_initmem(void)
{
#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
	free_init_pages("unused kernel memory",
			(unsigned long)(&__init_begin),
			(unsigned long)(&__init_end));
#endif
}
OpenPOWER on IntegriCloud