Subversion Repositories planix.SVN

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
/*
2
 * sheevaplug reboot code
3
 *
4
 * R11 is used by the loader as a temporary, so avoid it.
5
 */
6
#include "arm.s"
7
 
8
/*
9
 * Turn off MMU, then copy the new kernel to its correct location
10
 * in physical memory.  Then jump to the start of the kernel.
11
 */
12
 
13
/* main(PADDR(entry), PADDR(code), size); */
14
TEXT	main(SB), 1, $-4
15
	MOVW	$setR12(SB), R12
16
 
17
	MOVW	R0, p1+0(FP)		/* destination, passed in R0 */
18
 
19
	/* copy in arguments from frame */
20
	MOVW	R0, R8			/* entry point */
21
	MOVW	p2+4(FP), R9		/* source */
22
	MOVW	n+8(FP), R10		/* byte count */
23
 
24
PUTC('R')
25
	BL	cachesoff(SB)
26
	/* now back in 29- or 26-bit addressing, mainly for SB */
27
 
28
	/* turn the MMU off */
29
PUTC('e')
30
	MOVW	$KSEGM, R7
31
	MOVW	$PHYSDRAM, R0
32
	BL	_r15warp(SB)
33
 
34
	BIC	R7, R12			/* SB */
35
	BIC	R7, R13			/* SP */
36
	/* don't care about R14 */
37
 
38
PUTC('b')
39
	BL	mmuinvalidate(SB)
40
PUTC('o')
41
	BL	mmudisable(SB)
42
 
43
PUTC('o')
44
	MOVW	R9, R4			/* restore regs across function calls */
45
	MOVW	R10, R5
46
	MOVW	R8, R6
47
 
48
	/* set up a new stack for local vars and memmove args */
49
	MOVW	R6, SP			/* tiny trampoline stack */
50
	SUB	$(0x20 + 4), SP		/* back up before a.out header */
51
 
52
	MOVW	R14, -48(SP)		/* store return addr */
53
	SUB	$48, SP			/* allocate stack frame */
54
 
55
	MOVW	R6, 44(SP)		/* save dest/entry */
56
	MOVW	R5, 40(SP)		/* save count */
57
 
58
PUTC('t')
59
 
60
	MOVW	R6, 0(SP)
61
	MOVW	R6, 4(SP)		/* push dest */
62
	MOVW	R6, R0
63
	MOVW	R4, 8(SP)		/* push src */
64
	MOVW	R5, 12(SP)		/* push size */
65
	BL	memmove(SB)
66
 
67
	MOVW	44(SP), R6		/* restore R6 (dest/entry) */
68
	MOVW	40(SP), R5		/* restore R5 (count) */
69
PUTC('-')
70
	/*
71
	 * flush caches
72
	 */
73
	BL	cacheuwbinv(SB)
74
 
75
PUTC('>')
76
PUTC('\r');
77
PUTC('\n');
78
/*
79
 * jump to kernel entry point.  Note the true kernel entry point is
80
 * the virtual address KZERO|R6, but this must wait until
81
 * the MMU is enabled by the kernel in l.s
82
 */
83
	ORR	R6, R6			/* NOP: avoid link bug */
84
	B	(R6)
85
 
86
/*
87
 * turn the caches off, double map 0 & KZERO, invalidate TLBs, revert to
88
 * tiny addresses.  upon return, it will be safe to turn off the mmu.
89
 */
90
TEXT cachesoff(SB), 1, $-4
91
	MOVW	$(PsrDirq|PsrDfiq|PsrMsvc), R0
92
	MOVW	R0, CPSR
93
	MOVW	$KADDR(0x100-4), R7		/* just before this code */
94
	MOVW	R14, (R7)			/* save link */
95
 
96
	BL	cacheuwbinv(SB)
97
 
98
	MRC	CpSC, 0, R0, C(CpCONTROL), C(0)
99
	BIC	$(CpCwb|CpCicache|CpCdcache|CpCalign), R0
100
	MCR     CpSC, 0, R0, C(CpCONTROL), C(0)
101
	BARRIERS
102
 
103
	/* redo double map of 0, KZERO */
104
	MOVW	$(L1+L1X(PHYSDRAM)), R4		/* address of PTE for 0 */
105
	MOVW	$PTEDRAM, R2			/* PTE bits */
106
//	MOVW	$PTEIO, R2			/* PTE bits */
107
	MOVW	$PHYSDRAM, R3
108
	MOVW	$512, R5
109
_ptrdbl:
110
	ORR	R3, R2, R1		/* first identity-map 0 to 0, etc. */
111
	MOVW	R1, (R4)
112
	ADD	$4, R4				/* bump PTE address */
113
	ADD	$MiB, R3			/* bump pa */
114
	SUB.S	$1, R5
115
	BNE	_ptrdbl
116
 
117
	BARRIERS
118
	MOVW	$0, R0
119
	MCR	CpSC, 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinv
120
	MCR	CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
121
	BARRIERS
122
 
123
	/* back to 29- or 26-bit addressing, mainly for SB */
124
	MRC	CpSC, 0, R0, C(CpCONTROL), C(0)
125
	BIC	$(CpCd32|CpCi32), R0
126
	MCR     CpSC, 0, R0, C(CpCONTROL), C(0)
127
	BARRIERS
128
 
129
	MOVW	$KADDR(0x100-4), R7		/* just before this code */
130
	MOVW	(R7), R14			/* restore link */
131
	RET
132
 
133
TEXT _r15warp(SB), 1, $-4
134
	BIC	$KSEGM, R14
135
	ORR	R0, R14
136
	RET
137
 
138
TEXT mmudisable(SB), 1, $-4
139
	MRC	CpSC, 0, R0, C(CpCONTROL), C(0)
140
	BIC	$(CpChv|CpCmmu|CpCdcache|CpCicache|CpCwb), R0
141
	MCR     CpSC, 0, R0, C(CpCONTROL), C(0)
142
	BARRIERS
143
	RET
144
 
145
TEXT mmuinvalidate(SB), 1, $-4			/* invalidate all */
146
	MOVW	$0, R0
147
	MCR	CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
148
	BARRIERS
149
	RET
150
 
151
TEXT cacheuwbinv(SB), 1, $-4			/* D+I writeback+invalidate */
152
	BARRIERS
153
	MOVW	CPSR, R3			/* splhi */
154
	ORR	$(PsrDirq), R3, R1
155
	MOVW	R1, CPSR
156
 
157
_uwbinv:					/* D writeback+invalidate */
158
	MRC	CpSC, 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest
159
	BNE	_uwbinv
160
 
161
	MOVW	$0, R0				/* I invalidate */
162
	MCR	CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
163
	/* drain L1 write buffer, also drains L2 eviction buffer on sheeva */
164
	BARRIERS
165
 
166
	MCR	CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all
167
	BARRIERS
168
	MCR	CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all
169
	BARRIERS
170
 
171
	MOVW	R3, CPSR			/* splx */
172
	RET