summaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/regs_32.S
blob: 542cae17a93aebc316b184b1e8bfb36da292ed88 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <arch/spr_def.h>
#include <asm/processor.h>
#include <asm/switch_to.h>

/*
 * See <asm/switch_to.h>; called with prev and next task_struct pointers.
 * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
 *
 * We want to save pc/sp in "prev", and get the new pc/sp from "next".
 * We also need to save all the callee-saved registers on the stack.
 *
 * Intel enables/disables access to the hardware cycle counter in
 * seccomp (secure computing) environments if necessary, based on
 * has_secure_computing().  We might want to do this at some point,
 * though it would require virtualizing the other SPRs under WORLD_ACCESS.
 *
 * Since we're saving to the stack, we omit sp from this list.
 * And for parallels with other architectures, we save lr separately,
 * in the thread_struct itself (as the "pc" field).
 *
 * This code also needs to be aligned with process.c copy_thread()
 */

#if CALLEE_SAVED_REGS_COUNT != 24
# error Mismatch between <asm/switch_to.h> and kernel/entry.S
#endif
#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4)

#define SAVE_REG(r) { sw r12, r; addi r12, r12, 4 }
#define LOAD_REG(r) { lw r, r12; addi r12, r12, 4 }
#define FOR_EACH_CALLEE_SAVED_REG(f)					\
							f(r30); f(r31); \
	f(r32); f(r33); f(r34); f(r35);	f(r36); f(r37); f(r38); f(r39); \
	f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
	f(r48); f(r49); f(r50); f(r51); f(r52);

STD_ENTRY_SECTION(__switch_to, .sched.text)
	{
	  move r10, sp
	  sw sp, lr
	  addi sp, sp, -FRAME_SIZE
	}
	{
	  addi r11, sp, 4
	  addi r12, sp, 8
	}
	{
	  sw r11, r10
	  addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET
	}
	{
	  lw r13, r4   /* Load new sp to a temp register early. */
	  addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET
	}
	FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
	{
	  sw r3, sp
	  addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET
	}
	{
	  sw r3, lr
	  addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET
	}
	{
	  lw lr, r4
	  addi r12, r13, 8
	}
	{
	  /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
	  move sp, r13
	  mtspr SPR_SYSTEM_SAVE_K_0, r2
	}
	FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
.L__switch_to_pc:
	{
	  addi sp, sp, FRAME_SIZE
	  jrp lr   /* r0 is still valid here, so return it */
	}
	STD_ENDPROC(__switch_to)

/* Return a suitable address for the backtracer for suspended threads */
STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
	lnk r0
	{
	  addli r0, r0, .L__switch_to_pc - .
	  jrp lr
	}
	STD_ENDPROC(get_switch_to_pc)

STD_ENTRY(get_pt_regs)
	.irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \
		 r8, r9, r10, r11, r12, r13, r14, r15, \
		 r16, r17, r18, r19, r20, r21, r22, r23, \
		 r24, r25, r26, r27, r28, r29, r30, r31, \
		 r32, r33, r34, r35, r36, r37, r38, r39, \
		 r40, r41, r42, r43, r44, r45, r46, r47, \
		 r48, r49, r50, r51, r52, tp, sp
	{
	 sw r0, \reg
	 addi r0, r0, 4
	}
	.endr
	{
	 sw r0, lr
	 addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR
	}
	lnk r1
	{
	 sw r0, r1
	 addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
	}
	mfspr r1, INTERRUPT_CRITICAL_SECTION
	shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT
	ori r1, r1, KERNEL_PL
	{
	 sw r0, r1
	 addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
	}
	{
	 sw r0, zero       /* clear faultnum */
	 addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM
	}
	{
	 sw r0, zero       /* clear orig_r0 */
	 addli r0, r0, -PTREGS_OFFSET_ORIG_R0    /* restore r0 to base */
	}
	jrp lr
	STD_ENDPROC(get_pt_regs)
OpenPOWER on IntegriCloud