summaryrefslogtreecommitdiffstats
path: root/arch/x86/um/os-Linux/task_size.c
blob: 8502ad30e61bcfc49a9e28c975d356b4b628a62c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/mman.h>
#include <longjmp.h>

#ifdef __i386__

static jmp_buf buf;

static void segfault(int sig)
{
	longjmp(buf, 1);
}

static int page_ok(unsigned long page)
{
	unsigned long *address = (unsigned long *) (page << UM_KERN_PAGE_SHIFT);
	unsigned long n = ~0UL;
	void *mapped = NULL;
	int ok = 0;

	/*
	 * First see if the page is readable.  If it is, it may still
	 * be a VDSO, so we go on to see if it's writable.  If not
	 * then try mapping memory there.  If that fails, then we're
	 * still in the kernel area.  As a sanity check, we'll fail if
	 * the mmap succeeds, but gives us an address different from
	 * what we wanted.
	 */
	if (setjmp(buf) == 0)
		n = *address;
	else {
		mapped = mmap(address, UM_KERN_PAGE_SIZE,
			      PROT_READ | PROT_WRITE,
			      MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
		if (mapped == MAP_FAILED)
			return 0;
		if (mapped != address)
			goto out;
	}

	/*
	 * Now, is it writeable?  If so, then we're in user address
	 * space.  If not, then try mprotecting it and try the write
	 * again.
	 */
	if (setjmp(buf) == 0) {
		*address = n;
		ok = 1;
		goto out;
	} else if (mprotect(address, UM_KERN_PAGE_SIZE,
			    PROT_READ | PROT_WRITE) != 0)
		goto out;

	if (setjmp(buf) == 0) {
		*address = n;
		ok = 1;
	}

 out:
	if (mapped != NULL)
		munmap(mapped, UM_KERN_PAGE_SIZE);
	return ok;
}

unsigned long os_get_top_address(void)
{
	struct sigaction sa, old;
	unsigned long bottom = 0;
	/*
	 * A 32-bit UML on a 64-bit host gets confused about the VDSO at
	 * 0xffffe000.  It is mapped, is readable, can be reprotected writeable
	 * and written.  However, exec discovers later that it can't be
	 * unmapped.  So, just set the highest address to be checked to just
	 * below it.  This might waste some address space on 4G/4G 32-bit
	 * hosts, but shouldn't hurt otherwise.
	 */
	unsigned long top = 0xffffd000 >> UM_KERN_PAGE_SHIFT;
	unsigned long test, original;

	printf("Locating the bottom of the address space ... ");
	fflush(stdout);

	/*
	 * We're going to be longjmping out of the signal handler, so
	 * SA_DEFER needs to be set.
	 */
	sa.sa_handler = segfault;
	sigemptyset(&sa.sa_mask);
	sa.sa_flags = SA_NODEFER;
	if (sigaction(SIGSEGV, &sa, &old)) {
		perror("os_get_top_address");
		exit(1);
	}

	/* Manually scan the address space, bottom-up, until we find
	 * the first valid page (or run out of them).
	 */
	for (bottom = 0; bottom < top; bottom++) {
		if (page_ok(bottom))
			break;
	}

	/* If we've got this far, we ran out of pages. */
	if (bottom == top) {
		fprintf(stderr, "Unable to determine bottom of address "
			"space.\n");
		exit(1);
	}

	printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT);
	printf("Locating the top of the address space ... ");
	fflush(stdout);

	original = bottom;

	/* This could happen with a 4G/4G split */
	if (page_ok(top))
		goto out;

	do {
		test = bottom + (top - bottom) / 2;
		if (page_ok(test))
			bottom = test;
		else
			top = test;
	} while (top - bottom > 1);

out:
	/* Restore the old SIGSEGV handling */
	if (sigaction(SIGSEGV, &old, NULL)) {
		perror("os_get_top_address");
		exit(1);
	}
	top <<= UM_KERN_PAGE_SHIFT;
	printf("0x%x\n", top);

	return top;
}

#else

unsigned long os_get_top_address(void)
{
	/* The old value of CONFIG_TOP_ADDR */
	return 0x7fc0000000;
}

#endif
OpenPOWER on IntegriCloud