Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
#include	"mem.h"
2
 
3
/* use of SPRG registers in save/restore */
4
#define	SAVER0	SPRG0
5
#define	SAVER1	SPRG1
6
#define	SAVELR	SPRG2
7
#define	SAVEXX	SPRG3
8
 
9
#ifdef ucuconf
10
/* These only exist on the PPC 755: */
11
#define	SAVER4	SPRG4
12
#define	SAVER5	SPRG5
13
#define	SAVER6	SPRG6
14
#define	SAVER7	SPRG7
15
#endif /* ucuconf */
16
 
17
/* special instruction definitions */
18
#define	BDNZ		BC	16, 0,
19
#define	BDNE		BC	0, 2,
20
#define	MTCRF(r, crm)	WORD	$((31<<26)|((r)<<21)|(crm<<12)|(144<<1))
21
 
22
/* #define	TLBIA	WORD	$((31<<26)|(370<<1)) Not implemented on the 603e */
23
#define	TLBSYNC		WORD	$((31<<26)|(566<<1))
24
#define	TLBLI(n)	WORD	$((31<<26)|((n)<<11)|(1010<<1))
25
#define	TLBLD(n)	WORD	$((31<<26)|((n)<<11)|(978<<1))
26
 
27
/* on some models mtmsr doesn't synchronise enough (eg, 603e) */
28
#define	MSRSYNC	SYNC
29
 
30
#define	UREGSPACE	(UREGSIZE+8)
31
 
32
TEXT start(SB), $-4
33
 
34
	/*
35
	 * setup MSR
36
	 * turn off interrupts
37
	 * use 0x000 as exception prefix
38
	 * enable machine check
39
	 */
40
	MOVW	MSR, R3
41
	MOVW	$(MSR_ME|MSR_EE|MSR_IP), R4
42
	ANDN	R4, R3
43
	SYNC
44
	MOVW	R3, MSR
45
	MSRSYNC
46
 
47
	/* except during trap handling, R0 is zero from now on */
48
	MOVW	$0, R0
49
 
50
	/* setup SB for pre mmu */
51
	MOVW	$setSB(SB), R2
52
	MOVW	$KZERO, R3
53
	ANDN	R3, R2
54
 
55
	/* before this we're not running above KZERO */
56
	BL	mmuinit0(SB)
57
	/* after this we are */
58
 
59
#ifdef ucuconf
60
	MOVW	$0x2000000, R4		/* size */
61
	MOVW	$0, R3			/* base address */
62
	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
63
	CMP	R4, $0
64
	BLE	_dcf1
65
	SUB	R5, R3
66
	ADD	R3, R4
67
	ADD	$(CACHELINESZ-1), R4
68
	SRAW	$CACHELINELOG, R4
69
	MOVW	R4, CTR
70
_dcf0:	DCBF	(R5)
71
	ADD	$CACHELINESZ, R5
72
	BDNZ	_dcf0
73
_dcf1:
74
	SYNC
75
 
76
	/* BAT0, 3 unused, copy of BAT2 */
77
	MOVW	SPR(IBATL(2)), R3
78
	MOVW	R3, SPR(IBATL(0))
79
	MOVW	SPR(IBATU(2)), R3
80
	MOVW	R3, SPR(IBATU(0))
81
	MOVW	SPR(DBATL(2)), R3
82
	MOVW	R3, SPR(DBATL(0))
83
	MOVW	SPR(DBATU(2)), R3
84
	MOVW	R3, SPR(DBATU(0))
85
 
86
	MOVW	SPR(IBATL(2)), R3
87
	MOVW	R3, SPR(IBATL(3))
88
	MOVW	SPR(IBATU(2)), R3
89
	MOVW	R3, SPR(IBATU(3))
90
	MOVW	SPR(DBATL(2)), R3
91
	MOVW	R3, SPR(DBATL(3))
92
	MOVW	SPR(DBATU(2)), R3
93
	MOVW	R3, SPR(DBATU(3))
94
#endif /* ucuconf */
95
 
96
	/* running with MMU on!! */
97
 
98
	/* set R2 to correct value */
99
	MOVW	$setSB(SB), R2
100
 
101
	/* set up Mach */
102
	MOVW	$MACHADDR, R(MACH)
103
	ADD	$(MACHSIZE-8), R(MACH), R1	/* set stack */
104
 
105
	MOVW	R0, R(USER)		/* up-> set to zero */
106
	MOVW	R0, 0(R(MACH))		/* machno set to zero */
107
 
108
	BL	main(SB)
109
 
110
	RETURN				/* not reached */
111
 
112
/*
113
 * on return from this function we will be running in virtual mode.
114
 * We set up the Block Address Translation (BAT) registers thus:
115
 * 1) first 3 BATs are 256M blocks, starting from KZERO->0
116
 * 2) remaining BAT maps last 256M directly
117
 */
118
TEXT mmuinit0(SB), $0
119
	/* reset all the tlbs */
120
	MOVW	$64, R3
121
	MOVW	R3, CTR
122
	MOVW	$0, R4
123
 
124
tlbloop:
125
	TLBIE	R4
126
	SYNC
127
	ADD	$BIT(19), R4
128
	BDNZ	tlbloop
129
	TLBSYNC
130
 
131
#ifndef ucuconf
132
	/* BATs 0 and 1 cover memory from 0x00000000 to 0x20000000 */
133
 
134
	/* KZERO -> 0, IBAT and DBAT, 256 MB */
135
	MOVW	$(KZERO|(0x7ff<<2)|2), R3
136
	MOVW	$(PTEVALID|PTEWRITE), R4	/* PTEVALID => Cache coherency on */
137
	MOVW	R3, SPR(IBATU(0))
138
	MOVW	R4, SPR(IBATL(0))
139
	MOVW	R3, SPR(DBATU(0))
140
	MOVW	R4, SPR(DBATL(0))
141
 
142
	/* KZERO+256M -> 256M, IBAT and DBAT, 256 MB */
143
	ADD	$(1<<28), R3
144
	ADD	$(1<<28), R4
145
	MOVW	R3, SPR(IBATU(1))
146
	MOVW	R4, SPR(IBATL(1))
147
	MOVW	R3, SPR(DBATU(1))
148
	MOVW	R4, SPR(DBATL(1))
149
 
150
	/* FPGABASE -> FPGABASE, DBAT, 16 MB */
151
	MOVW	$(FPGABASE|(0x7f<<2)|2), R3
152
	MOVW	$(FPGABASE|PTEWRITE|PTEUNCACHED), R4	/* FPGA memory, don't cache */
153
	MOVW	R3, SPR(DBATU(2))
154
	MOVW	R4, SPR(DBATL(2))
155
 
156
	/* IBAT 2 unused */
157
	MOVW	R0, SPR(IBATU(2))
158
	MOVW	R0, SPR(IBATL(2))
159
 
160
	/* direct map last block, uncached, (not guarded, doesn't work for BAT), DBAT only */
161
	MOVW	$(INTMEM|(0x7ff<<2)|2), R3
162
	MOVW	$(INTMEM|PTEWRITE|PTEUNCACHED), R4	/* Don't set PTEVALID here */
163
	MOVW	R3, SPR(DBATU(3))
164
	MOVW	R4, SPR(DBATL(3))
165
 
166
	/* IBAT 3 unused */
167
	MOVW	R0, SPR(IBATU(3))
168
	MOVW	R0, SPR(IBATL(3))
169
#else /* ucuconf */
170
	/* BAT 2 covers memory from 0x00000000 to 0x10000000 */
171
 
172
	/* KZERO -> 0, IBAT2 and DBAT2, 256 MB */
173
	MOVW	$(KZERO|(0x7ff<<2)|2), R3
174
	MOVW	$(PTEVALID|PTEWRITE), R4	/* PTEVALID => Cache coherency on */
175
	MOVW	R3, SPR(DBATU(2))
176
	MOVW	R4, SPR(DBATL(2))
177
	MOVW	R3, SPR(IBATU(2))
178
	MOVW	R4, SPR(IBATL(2))
179
#endif /* ucuconf */
180
 
181
	/* enable MMU */
182
	MOVW	LR, R3
183
	OR	$KZERO, R3
184
	MOVW	R3, SPR(SRR0)		/* Stored PC for RFI instruction */
185
	MOVW	MSR, R4
186
	OR	$(MSR_IR|MSR_DR|MSR_RI|MSR_FP), R4
187
	MOVW	R4, SPR(SRR1)
188
	RFI				/* resume in kernel mode in caller */
189
 
190
	RETURN
191
 
192
TEXT kfpinit(SB), $0
193
	MOVFL	$0, FPSCR(7)
194
	MOVFL	$0xD, FPSCR(6)		/* VE, OE, ZE */
195
	MOVFL	$0, FPSCR(5)
196
	MOVFL	$0, FPSCR(3)
197
	MOVFL	$0, FPSCR(2)
198
	MOVFL	$0, FPSCR(1)
199
	MOVFL	$0, FPSCR(0)
200
 
201
	FMOVD	$4503601774854144.0, F27
202
	FMOVD	$0.5, F29
203
	FSUB	F29, F29, F28
204
	FADD	F29, F29, F30
205
	FADD	F30, F30, F31
206
	FMOVD	F28, F0
207
	FMOVD	F28, F1
208
	FMOVD	F28, F2
209
	FMOVD	F28, F3
210
	FMOVD	F28, F4
211
	FMOVD	F28, F5
212
	FMOVD	F28, F6
213
	FMOVD	F28, F7
214
	FMOVD	F28, F8
215
	FMOVD	F28, F9
216
	FMOVD	F28, F10
217
	FMOVD	F28, F11
218
	FMOVD	F28, F12
219
	FMOVD	F28, F13
220
	FMOVD	F28, F14
221
	FMOVD	F28, F15
222
	FMOVD	F28, F16
223
	FMOVD	F28, F17
224
	FMOVD	F28, F18
225
	FMOVD	F28, F19
226
	FMOVD	F28, F20
227
	FMOVD	F28, F21
228
	FMOVD	F28, F22
229
	FMOVD	F28, F23
230
	FMOVD	F28, F24
231
	FMOVD	F28, F25
232
	FMOVD	F28, F26
233
	RETURN
234
 
235
TEXT splhi(SB), $0
236
	MOVW	LR, R31
237
	MOVW	R31, 4(R(MACH))		/* save PC in m->splpc */
238
	MOVW	MSR, R3
239
	RLWNM	$0, R3, $~MSR_EE, R4
240
	SYNC
241
	MOVW	R4, MSR
242
	MSRSYNC
243
	RETURN
244
 
245
TEXT splx(SB), $0
246
	/* fall though */
247
 
248
TEXT splxpc(SB), $0
249
	MOVW	LR, R31
250
	MOVW	R31, 4(R(MACH))		/* save PC in m->splpc */
251
	MOVW	MSR, R4
252
	RLWMI	$0, R3, $MSR_EE, R4
253
	SYNC
254
	MOVW	R4, MSR
255
	MSRSYNC
256
	RETURN
257
 
258
TEXT spllo(SB), $0
259
	MOVW	MSR, R3
260
	OR	$MSR_EE, R3, R4
261
	SYNC
262
	MOVW	R4, MSR
263
	MSRSYNC
264
	RETURN
265
 
266
TEXT spldone(SB), $0
267
	RETURN
268
 
269
TEXT islo(SB), $0
270
	MOVW	MSR, R3
271
	RLWNM	$0, R3, $MSR_EE, R3
272
	RETURN
273
 
274
TEXT setlabel(SB), $-4
275
	MOVW	LR, R31
276
	MOVW	R1, 0(R3)
277
	MOVW	R31, 4(R3)
278
	MOVW	$0, R3
279
	RETURN
280
 
281
TEXT gotolabel(SB), $-4
282
	MOVW	4(R3), R31
283
	MOVW	R31, LR
284
	MOVW	0(R3), R1
285
	MOVW	$1, R3
286
	RETURN
287
 
288
TEXT touser(SB), $-4
289
	MOVW	$(UTZERO+32), R5	/* header appears in text */
290
	MOVW	$(MSR_EE|MSR_PR|MSR_IR|MSR_DR|MSR_RI), R4
291
	MOVW	R4, SPR(SRR1)
292
	MOVW	R3, R1
293
	MOVW	R5, SPR(SRR0)
294
	RFI
295
 
296
TEXT dczap(SB), $-4			/* dczap(virtaddr, count) */
297
	MOVW	n+4(FP), R4
298
	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
299
	CMP	R4, $0
300
	BLE	dcz1
301
	SUB	R5, R3
302
	ADD	R3, R4
303
	ADD	$(CACHELINESZ-1), R4
304
	SRAW	$CACHELINELOG, R4
305
	MOVW	R4, CTR
306
dcz0:
307
	DCBI	(R5)
308
	ADD	$CACHELINESZ, R5
309
	BDNZ	dcz0
310
dcz1:
311
	SYNC
312
	RETURN
313
 
314
TEXT dcflush(SB), $-4			/* dcflush(virtaddr, count) */
315
	MOVW	n+4(FP), R4
316
	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
317
	CMP	R4, $0
318
	BLE	dcf1
319
	SUB	R5, R3
320
	ADD	R3, R4
321
	ADD	$(CACHELINESZ-1), R4
322
	SRAW	$CACHELINELOG, R4
323
	MOVW	R4, CTR
324
dcf0:	DCBST	(R5)
325
	ADD	$CACHELINESZ, R5
326
	BDNZ	dcf0
327
dcf1:
328
	SYNC
329
	RETURN
330
 
331
TEXT icflush(SB), $-4			/* icflush(virtaddr, count) */
332
	MOVW	n+4(FP), R4
333
	RLWNM	$0, R3, $~(CACHELINESZ-1), R5
334
	CMP	R4, $0
335
	BLE	icf1
336
	SUB	R5, R3
337
	ADD	R3, R4
338
	ADD	$(CACHELINESZ-1), R4
339
	SRAW	$CACHELINELOG, R4
340
	MOVW	R4, CTR
341
icf0:	ICBI	(R5)			/* invalidate the instruction cache */
342
	ADD	$CACHELINESZ, R5
343
	BDNZ	icf0
344
	ISYNC
345
icf1:
346
	RETURN
347
 
348
TEXT tas(SB), $0
349
	MOVW	R3, R4
350
	MOVW	$0xdead, R5
351
tas1:
352
	DCBF	(R4)			/* fix for 603x bug */
353
	SYNC
354
	LWAR	(R4), R3
355
	CMP	R3, $0
356
	BNE	tas0
357
	STWCCC	R5, (R4)
358
	BNE	tas1
359
	EIEIO
360
tas0:
361
	SYNC
362
	RETURN
363
 
364
TEXT _xinc(SB), $0			/* void _xinc(long *); */
365
	MOVW	R3, R4
366
xincloop:
367
	DCBF	(R4)			/* fix for 603x bug */
368
	LWAR	(R4), R3
369
	ADD	$1, R3
370
	STWCCC	R3, (R4)
371
	BNE	xincloop
372
	RETURN
373
 
374
TEXT _xdec(SB), $0			/* long _xdec(long *); */
375
	MOVW	R3, R4
376
xdecloop:
377
	DCBF	(R4)			/* fix for 603x bug */
378
	LWAR	(R4), R3
379
	ADD	$-1, R3
380
	STWCCC	R3, (R4)
381
	BNE	xdecloop
382
	RETURN
383
 
384
TEXT cmpswap(SB),$0	/* int cmpswap(long*, long, long) */
385
	MOVW	R3, R4	/* addr */
386
	MOVW	old+4(FP), R5
387
	MOVW	new+8(FP), R6
388
	DCBF	(R4)		/* fix for 603x bug? */
389
	LWAR	(R4), R3
390
	CMP	R3, R5
391
	BNE fail
392
	STWCCC	R6, (R4)
393
	BNE fail
394
	MOVW $1, R3
395
	RETURN
396
fail:
397
	MOVW $0, R3
398
	RETURN
399
 
400
TEXT tlbflushall(SB), $0
401
	MOVW	$TLBENTRIES, R3
402
	MOVW	R3, CTR
403
	MOVW	$0, R4
404
	ISYNC
405
tlbflushall0:
406
	TLBIE	R4
407
	SYNC
408
	ADD	$BIT(19), R4
409
	BDNZ	tlbflushall0
410
	TLBSYNC
411
	RETURN
412
 
413
TEXT tlbflush(SB), $0
414
	ISYNC
415
	TLBIE	R3
416
	SYNC
417
	TLBSYNC
418
	RETURN
419
 
420
TEXT gotopc(SB), $0
421
	MOVW	R3, CTR
422
	MOVW	LR, R31			/* for trace back */
423
	BR	(CTR)
424
 
425
/* On an imiss, we get here.  If we can resolve it, we do.
426
 * Otherwise take the real trap.  The code at the vector is
427
 *	MOVW	R0, SPR(SAVER0)	No point to this, of course
428
 *	MOVW	LR, R0
429
 *	MOVW	R0, SPR(SAVELR)
430
 *	BL	imiss(SB)		or dmiss, as the case may be
431
 *	BL	tlbvec(SB)
432
 */
433
TEXT imiss(SB), $-4
434
	/* Statistics */
435
	MOVW	$MACHPADDR, R1
436
	MOVW	0xc(R1), R3		/* count m->tlbfault */
437
	ADD	$1, R3
438
	MOVW	R3, 0xc(R1)
439
	MOVW	0x10(R1), R3		/* count m->imiss */
440
	ADD	$1, R3
441
	MOVW	R3, 0x10(R1)
442
 
443
	/* Real work */
444
	MOVW	SPR(HASH1), R1		/* (phys) pointer into the hash table */
445
	ADD	$BY2PTEG, R1, R2	/* end pointer */
446
	MOVW	SPR(iCMP), R3		/* pattern to look for */
447
imiss1:
448
	MOVW	(R1), R0
449
	CMP	R3, R0
450
	BEQ	imiss2			/* found the entry */
451
	ADD	$8, R1
452
	CMP	R1, R2			/* test end of loop */
453
	BNE	imiss1			/* Loop */
454
	/* Failed to find an entry; take the full trap */
455
	MOVW	SPR(SRR1), R1
456
	MTCRF(1, 0x80)			/* restore CR0 bits (they're auto saved in SRR1) */
457
	RETURN
458
imiss2:
459
	/* Found the entry */
460
	MOVW	4(R1), R2		/* Phys addr */
461
	MOVW	R2, SPR(RPA)
462
	MOVW	SPR(IMISS), R3
463
	TLBLI(3)
464
 
465
	/* Restore Registers */
466
	MOVW	SPR(SRR1), R1		/* Restore the CR0 field of the CR register from SRR1 */
467
	MTCRF(1, 0x80)
468
	MOVW	SPR(SAVELR), R0
469
	MOVW	R0, LR
470
	RFI
471
 
472
/* On a data load or store miss, we get here.  If we can resolve it, we do.
473
 * Otherwise take the real trap
474
 */
475
TEXT dmiss(SB), $-4
476
	/* Statistics */
477
	MOVW	$MACHPADDR, R1
478
	MOVW	0xc(R1), R3		/* count m->tlbfault */
479
	ADD	$1, R3
480
	MOVW	R3, 0xc(R1)
481
	MOVW	0x14(R1), R3		/* count m->dmiss */
482
	ADD	$1, R3
483
	MOVW	R3, 0x14(R1)
484
	/* Real work */
485
	MOVW	SPR(HASH1), R1		/* (phys) pointer into the hash table */
486
	ADD	$BY2PTEG, R1, R2	/* end pointer */
487
	MOVW	SPR(DCMP), R3		/* pattern to look for */
488
dmiss1:
489
	MOVW	(R1), R0
490
	CMP	R3, R0
491
	BEQ	dmiss2			/* found the entry */
492
	ADD	$8, R1
493
	CMP	R1, R2			/* test end of loop */
494
	BNE	dmiss1			/* Loop */
495
	/* Failed to find an entry; take the full trap */
496
	MOVW	SPR(SRR1), R1
497
	MTCRF(1, 0x80)			/* restore CR0 bits (they're auto saved in SRR1) */
498
	RETURN
499
dmiss2:
500
	/* Found the entry */
501
	MOVW	4(R1), R2		/* Phys addr */
502
	MOVW	R2, SPR(RPA)
503
	MOVW	SPR(DMISS), R3
504
	TLBLD(3)
505
	/* Restore Registers */
506
	MOVW	SPR(SRR1), R1		/* Restore the CR0 field of the CR register from SRR1 */
507
	MTCRF(1, 0x80)
508
	MOVW	SPR(SAVELR), R0
509
	MOVW	R0, LR
510
	RFI
511
 
512
/*
513
 * When a trap sets the TGPR bit (TLB miss traps do this),
514
 * registers get remapped: R0-R31 are temporarily inaccessible,
515
 * and Temporary Registers TR0-TR3 are mapped onto R0-R3.
516
 * While this bit is set, R4-R31 cannot be used.
517
 * The code at the vector has executed this code before
518
 * coming to tlbvec:
519
 *	MOVW	R0, SPR(SAVER0)	No point to this, of course
520
 *	MOVW	LR, R0
521
 *	MOVW	R0, SPR(SAVELR)
522
 *	BL	tlbvec(SB)
523
 * SAVER0 can be reused.  We're not interested in the value of TR0
524
 */
525
TEXT tlbvec(SB), $-4
526
 	MOVW	MSR, R1
527
	RLWNM	$0, R1, $~MSR_TGPR, R1	/* Clear the dreaded TGPR bit in the MSR */
528
	SYNC
529
	MOVW	R1, MSR
530
	MSRSYNC
531
	/* Now the GPRs are what they're supposed to be, save R0 again */
532
	MOVW	R0, SPR(SAVER0)
533
	/* Fall through to trapvec */
534
 
535
/*
536
 * traps force memory mapping off.
537
 * the following code has been executed at the exception
538
 * vector location
539
 *	MOVW	R0, SPR(SAVER0)
540
 *	MOVW	LR, R0
541
 *	MOVW	R0, SPR(SAVELR)
542
 *	bl	trapvec(SB)
543
 *
544
 */
545
TEXT trapvec(SB), $-4
546
	MOVW	LR, R0
547
	MOVW	R1, SPR(SAVER1)
548
	MOVW	R0, SPR(SAVEXX)		/* vector */
549
 
550
	/* did we come from user space */
551
	MOVW	SPR(SRR1), R0
552
	MOVW	CR, R1
553
	MOVW	R0, CR
554
	BC	4, 17, ktrap
555
 
556
	/* switch to kernel stack */
557
	MOVW	R1, CR
558
	MOVW	$MACHPADDR, R1		/* PADDR(m->) */
559
	MOVW	8(R1), R1		/* m->proc */
560
	RLWNM	$0, R1, $~KZERO, R1	/* PADDR(m->proc) */
561
	MOVW	8(R1), R1		/* m->proc->kstack */
562
	RLWNM	$0, R1, $~KZERO, R1	/* PADDR(m->proc->kstack) */
563
	ADD	$(KSTACK-UREGSIZE), R1	/* make room on stack */
564
 
565
	BL	saveureg(SB)
566
	BL	trap(SB)
567
	BR	restoreureg
568
 
569
ktrap:
570
	MOVW	R1, CR
571
	MOVW	SPR(SAVER1), R1
572
	RLWNM	$0, R1, $~KZERO, R1	/* set stack pointer */
573
	SUB	$UREGSPACE, R1
574
 
575
	BL	saveureg(SB)		/* addressed relative to PC */
576
	BL	trap(SB)
577
	BR	restoreureg
578
 
579
/*
580
 * enter with stack set and mapped.
581
 * on return, SB (R2) has been set, and R3 has the Ureg*,
582
 * the MMU has been re-enabled, kernel text and PC are in KSEG,
583
 * R(MACH) has been set, and R0 contains 0.
584
 *
585
 */
586
TEXT saveureg(SB), $-4
587
/*
588
 * save state
589
 */
590
	MOVMW	R2, 48(R1)		/* save r2 .. r31 in 48(R1) .. 164(R1) */
591
	MOVW	$MACHPADDR, R(MACH)	/* PADDR(m->) */
592
	MOVW	8(R(MACH)), R(USER)	/* up-> */
593
	MOVW	$MACHADDR, R(MACH)	/* m-> */
594
	MOVW	SPR(SAVER1), R4
595
	MOVW	R4, 44(R1)
596
	MOVW	SPR(SAVER0), R5
597
	MOVW	R5, 40(R1)
598
	MOVW	CTR, R6
599
	MOVW	R6, 36(R1)
600
	MOVW	XER, R4
601
	MOVW	R4, 32(R1)
602
	MOVW	CR, R5
603
	MOVW	R5, 28(R1)
604
	MOVW	SPR(SAVELR), R6		/* LR */
605
	MOVW	R6, 24(R1)
606
	/* pad at 20(R1) */
607
	MOVW	SPR(SRR0), R0
608
	MOVW	R0, 16(R1)		/* old PC */
609
	MOVW	SPR(SRR1), R0
610
	MOVW	R0, 12(R1)		/* old status */
611
	MOVW	SPR(SAVEXX), R0
612
	MOVW	R0, 8(R1)		/* cause/vector */
613
	MOVW	SPR(DCMP), R0
614
	MOVW	R0, (160+8)(R1)
615
	MOVW	SPR(iCMP), R0
616
	MOVW	R0, (164+8)(R1)
617
	MOVW	SPR(DMISS), R0
618
	MOVW	R0, (168+8)(R1)
619
	MOVW	SPR(IMISS), R0
620
	MOVW	R0, (172+8)(R1)
621
	MOVW	SPR(HASH1), R0
622
	MOVW	R0, (176+8)(R1)
623
	MOVW	SPR(HASH2), R0
624
	MOVW	R0, (180+8)(R1)
625
	MOVW	SPR(DAR), R0
626
	MOVW	R0, (184+8)(R1)
627
	MOVW	SPR(DSISR), R0
628
	MOVW	R0, (188+8)(R1)
629
	ADD	$8, R1, R3		/* Ureg* */
630
	OR	$KZERO, R3		/* fix ureg */
631
	STWCCC	R3, (R1)		/* break any pending reservations */
632
	MOVW	$0, R0			/* compiler/linker expect R0 to be zero */
633
	MOVW	$setSB(SB), R2		/* SB register */
634
 
635
	MOVW	MSR, R5
636
	OR	$(MSR_IR|MSR_DR|MSR_FP|MSR_RI), R5	/* enable MMU */
637
	MOVW	R5, SPR(SRR1)
638
	MOVW	LR, R31
639
	OR	$KZERO, R31		/* return PC in KSEG0 */
640
	MOVW	R31, SPR(SRR0)
641
	OR	$KZERO, R1		/* fix stack pointer */
642
	RFI				/* returns to trap handler */
643
 
644
/*
645
 * restore state from Ureg and return from trap/interrupt
646
 */
647
TEXT forkret(SB), $0
648
	BR	restoreureg
649
 
650
restoreureg:
651
	MOVMW	48(R1), R2		/* restore r2 through r31 */
652
	/* defer R1 */
653
	MOVW	40(R1), R0
654
	MOVW	R0, SPR(SAVER0)		/* resave saved R0 */
655
	MOVW	36(R1), R0
656
	MOVW	R0, CTR
657
	MOVW	32(R1), R0
658
	MOVW	R0, XER
659
	MOVW	28(R1), R0
660
	MOVW	R0, CR			/* Condition register*/
661
	MOVW	24(R1), R0
662
	MOVW	R0, LR
663
	/* pad, skip */
664
	MOVW	16(R1), R0
665
	MOVW	R0, SPR(SRR0)		/* old PC */
666
	MOVW	12(R1), R0
667
	MOVW	R0, SPR(SRR1)		/* old MSR */
668
	/* cause, skip */
669
	MOVW	44(R1), R1		/* old SP */
670
	MOVW	SPR(SAVER0), R0
671
	RFI
672
 
673
TEXT getpvr(SB), $0
674
	MOVW	SPR(PVR), R3
675
	RETURN
676
 
677
TEXT getdec(SB), $0
678
	MOVW	SPR(DEC), R3
679
	RETURN
680
 
681
TEXT putdec(SB), $0
682
	MOVW	R3, SPR(DEC)
683
	RETURN
684
 
685
TEXT getdar(SB), $0
686
	MOVW	SPR(DAR), R3
687
	RETURN
688
 
689
TEXT getdsisr(SB), $0
690
	MOVW	SPR(DSISR), R3
691
	RETURN
692
 
693
TEXT getmsr(SB), $0
694
	MOVW	MSR, R3
695
	RETURN
696
 
697
TEXT putmsr(SB), $0
698
	MOVW	R3, MSR
699
	MSRSYNC
700
	RETURN
701
 
702
TEXT putsdr1(SB), $0
703
	SYNC
704
	MOVW	R3, SPR(SDR1)
705
	ISYNC
706
	RETURN
707
 
708
TEXT putsr(SB), $0
709
	MOVW	4(FP), R4
710
	SYNC
711
	MOVW	R4, SEG(R3)
712
	MSRSYNC
713
	RETURN
714
 
715
TEXT getsr(SB), $0
716
	MOVW	SEG(R3), R3
717
	RETURN
718
 
719
TEXT gethid0(SB), $0
720
	MOVW	SPR(HID0), R3
721
	RETURN
722
 
723
TEXT puthid0(SB), $0
724
	SYNC
725
	ISYNC
726
	MOVW	R3, SPR(HID0)
727
	SYNC
728
	RETURN
729
 
730
TEXT gethid1(SB), $0
731
	MOVW	SPR(HID1), R3
732
	RETURN
733
 
734
TEXT gethid2(SB), $0
735
	MOVW	SPR(HID2), R3
736
	RETURN
737
 
738
TEXT puthid2(SB), $0
739
	MOVW	R3, SPR(HID2)
740
	RETURN
741
 
742
TEXT eieio(SB), $0
743
	EIEIO
744
	RETURN
745
 
746
TEXT sync(SB), $0
747
	SYNC
748
	RETURN
749
 
750
/* Power PC 603e specials */
751
TEXT getimiss(SB), $0
752
	MOVW	SPR(IMISS), R3
753
	RETURN
754
 
755
TEXT geticmp(SB), $0
756
	MOVW	SPR(iCMP), R3
757
	RETURN
758
 
759
TEXT puticmp(SB), $0
760
	MOVW	R3, SPR(iCMP)
761
	RETURN
762
 
763
TEXT getdmiss(SB), $0
764
	MOVW	SPR(DMISS), R3
765
	RETURN
766
 
767
TEXT getdcmp(SB), $0
768
	MOVW	SPR(DCMP), R3
769
	RETURN
770
 
771
TEXT putdcmp(SB), $0
772
	MOVW	R3, SPR(DCMP)
773
	RETURN
774
 
775
TEXT getsdr1(SB), $0
776
	MOVW	SPR(SDR1), R3
777
	RETURN
778
 
779
TEXT gethash1(SB), $0
780
	MOVW	SPR(HASH1), R3
781
	RETURN
782
 
783
TEXT puthash1(SB), $0
784
	MOVW	R3, SPR(HASH1)
785
	RETURN
786
 
787
TEXT gethash2(SB), $0
788
	MOVW	SPR(HASH2), R3
789
	RETURN
790
 
791
TEXT puthash2(SB), $0
792
	MOVW	R3, SPR(HASH2)
793
	RETURN
794
 
795
TEXT getrpa(SB), $0
796
	MOVW	SPR(RPA), R3
797
	RETURN
798
 
799
TEXT putrpa(SB), $0
800
	MOVW	R3, SPR(RPA)
801
	RETURN
802
 
803
TEXT tlbli(SB), $0
804
	TLBLI(3)
805
	ISYNC
806
	RETURN
807
 
808
TEXT tlbld(SB), $0
809
	SYNC
810
	TLBLD(3)
811
	ISYNC
812
	RETURN
813
 
814
TEXT getsrr1(SB), $0
815
	MOVW	SPR(SRR1), R3
816
	RETURN
817
 
818
TEXT putsrr1(SB), $0
819
	MOVW	R3, SPR(SRR1)
820
	RETURN
821
 
822
TEXT fpsave(SB), $0
823
	FMOVD	F0, (0*8)(R3)
824
	FMOVD	F1, (1*8)(R3)
825
	FMOVD	F2, (2*8)(R3)
826
	FMOVD	F3, (3*8)(R3)
827
	FMOVD	F4, (4*8)(R3)
828
	FMOVD	F5, (5*8)(R3)
829
	FMOVD	F6, (6*8)(R3)
830
	FMOVD	F7, (7*8)(R3)
831
	FMOVD	F8, (8*8)(R3)
832
	FMOVD	F9, (9*8)(R3)
833
	FMOVD	F10, (10*8)(R3)
834
	FMOVD	F11, (11*8)(R3)
835
	FMOVD	F12, (12*8)(R3)
836
	FMOVD	F13, (13*8)(R3)
837
	FMOVD	F14, (14*8)(R3)
838
	FMOVD	F15, (15*8)(R3)
839
	FMOVD	F16, (16*8)(R3)
840
	FMOVD	F17, (17*8)(R3)
841
	FMOVD	F18, (18*8)(R3)
842
	FMOVD	F19, (19*8)(R3)
843
	FMOVD	F20, (20*8)(R3)
844
	FMOVD	F21, (21*8)(R3)
845
	FMOVD	F22, (22*8)(R3)
846
	FMOVD	F23, (23*8)(R3)
847
	FMOVD	F24, (24*8)(R3)
848
	FMOVD	F25, (25*8)(R3)
849
	FMOVD	F26, (26*8)(R3)
850
	FMOVD	F27, (27*8)(R3)
851
	FMOVD	F28, (28*8)(R3)
852
	FMOVD	F29, (29*8)(R3)
853
	FMOVD	F30, (30*8)(R3)
854
	FMOVD	F31, (31*8)(R3)
855
	MOVFL	FPSCR, F0
856
	FMOVD	F0, (32*8)(R3)
857
	RETURN
858
 
859
TEXT fprestore(SB), $0
860
	FMOVD	(32*8)(R3), F0
861
	MOVFL	F0, FPSCR
862
	FMOVD	(0*8)(R3), F0
863
	FMOVD	(1*8)(R3), F1
864
	FMOVD	(2*8)(R3), F2
865
	FMOVD	(3*8)(R3), F3
866
	FMOVD	(4*8)(R3), F4
867
	FMOVD	(5*8)(R3), F5
868
	FMOVD	(6*8)(R3), F6
869
	FMOVD	(7*8)(R3), F7
870
	FMOVD	(8*8)(R3), F8
871
	FMOVD	(9*8)(R3), F9
872
	FMOVD	(10*8)(R3), F10
873
	FMOVD	(11*8)(R3), F11
874
	FMOVD	(12*8)(R3), F12
875
	FMOVD	(13*8)(R3), F13
876
	FMOVD	(14*8)(R3), F14
877
	FMOVD	(15*8)(R3), F15
878
	FMOVD	(16*8)(R3), F16
879
	FMOVD	(17*8)(R3), F17
880
	FMOVD	(18*8)(R3), F18
881
	FMOVD	(19*8)(R3), F19
882
	FMOVD	(20*8)(R3), F20
883
	FMOVD	(21*8)(R3), F21
884
	FMOVD	(22*8)(R3), F22
885
	FMOVD	(23*8)(R3), F23
886
	FMOVD	(24*8)(R3), F24
887
	FMOVD	(25*8)(R3), F25
888
	FMOVD	(26*8)(R3), F26
889
	FMOVD	(27*8)(R3), F27
890
	FMOVD	(28*8)(R3), F28
891
	FMOVD	(29*8)(R3), F29
892
	FMOVD	(30*8)(R3), F30
893
	FMOVD	(31*8)(R3), F31
894
	RETURN
895
 
896
TEXT dcacheenb(SB), $0
897
	SYNC
898
	MOVW	SPR(HID0), R4		/* Get HID0 and clear unwanted bits */
899
	RLWNM	$0, R4, $~(HID_DLOCK), R4
900
	MOVW	$(HID_DCFI|HID_DCE), R5
901
	OR	R4, R5
902
	MOVW	$HID_DCE, R3
903
	OR	R4, R3
904
	SYNC
905
//	MOVW	R5, SPR(HID0)		/* Cache enable and flash invalidate */
906
	MOVW	R3, SPR(HID0)		/* Cache enable */
907
	SYNC
908
	RETURN
909
 
910
TEXT icacheenb(SB), $0
911
	SYNC
912
	MOVW	SPR(HID0), R4		/* Get HID0 and clear unwanted bits */
913
	RLWNM	$0, R4, $~(HID_ILOCK), R4
914
	MOVW	$(HID_ICFI|HID_ICE), R5
915
	OR	R4, R5
916
	MOVW	$HID_ICE, R3
917
	OR	R4, R3
918
	SYNC
919
	MOVW	R5, SPR(HID0)		/* Cache enable and flash invalidate */
920
	MOVW	R3, SPR(HID0)		/* Cache enable */
921
	SYNC
922
	RETURN
923
 
924
#ifdef ucuconf
925
TEXT getpll(SB), $0
926
	MOVW	SPR(1009), R3
927
	ISYNC
928
	RETURN
929
 
930
TEXT getl2pm(SB), $0
931
	MOVW	SPR(1016), R3
932
	RETURN
933
 
934
TEXT getl2cr(SB), $0
935
	MOVW	SPR(1017), R3
936
	RETURN
937
 
938
TEXT putl2cr(SB), $0
939
	MOVW	R3, SPR(1017)
940
	RETURN
941
 
942
TEXT dcachedis(SB), $0
943
	SYNC
944
/*	MOVW	SPR(HID0), R4
945
	RLWNM	$0, R4, $~(HID_DCE), R4
946
	MOVW	R4, SPR(HID0)		/* L1 Cache disable */
947
 
948
	MOVW	SPR(1017), R4
949
	RLWNM	$0, R4, $~(0x80000000), R4
950
	MOVW	R4, SPR(1017)		/* L2 Cache disable */
951
 
952
	SYNC
953
	RETURN
954
 
955
TEXT l2disable(SB), $0
956
	SYNC
957
	MOVW	SPR(1017), R4
958
	RLWNM	$0, R4, $~(0x80000000), R4
959
	MOVW	R4, SPR(1017)		/* L2 Cache disable */
960
	SYNC
961
	RETURN
962
 
963
TEXT getbats(SB), $0
964
	MOVW	SPR(DBATU(0)), R4
965
	MOVW	R4, 0(R3)
966
	MOVW	SPR(DBATL(0)), R4
967
	MOVW	R4, 4(R3)
968
	MOVW	SPR(IBATU(0)), R4
969
	MOVW	R4, 8(R3)
970
	MOVW	SPR(IBATL(0)), R4
971
	MOVW	R4, 12(R3)
972
	MOVW	SPR(DBATU(1)), R4
973
	MOVW	R4, 16(R3)
974
	MOVW	SPR(DBATL(1)), R4
975
	MOVW	R4, 20(R3)
976
	MOVW	SPR(IBATU(1)), R4
977
	MOVW	R4, 24(R3)
978
	MOVW	SPR(IBATL(1)), R4
979
	MOVW	R4, 28(R3)
980
	MOVW	SPR(DBATU(2)), R4
981
	MOVW	R4, 32(R3)
982
	MOVW	SPR(DBATL(2)), R4
983
	MOVW	R4, 36(R3)
984
	MOVW	SPR(IBATU(2)), R4
985
	MOVW	R4, 40(R3)
986
	MOVW	SPR(IBATL(2)), R4
987
	MOVW	R4, 44(R3)
988
	MOVW	SPR(DBATU(3)), R4
989
	MOVW	R4, 48(R3)
990
	MOVW	SPR(DBATL(3)), R4
991
	MOVW	R4, 52(R3)
992
	MOVW	SPR(IBATU(3)), R4
993
	MOVW	R4, 56(R3)
994
	MOVW	SPR(IBATL(3)), R4
995
	MOVW	R4, 60(R3)
996
	RETURN
997
 
998
TEXT setdbat0(SB), $0
999
	MOVW	0(R3), R4
1000
	MOVW	R4, SPR(DBATU(0))
1001
	MOVW	4(R3), R4
1002
	MOVW	R4, SPR(DBATL(0))
1003
	RETURN
1004
#endif /* ucuconf */
1005
 
1006
TEXT mmudisable(SB), $0
1007
	/* disable MMU */
1008
	MOVW	LR, R4
1009
	MOVW	$KZERO, R5
1010
	ANDN	R5, R4
1011
	MOVW	R4, SPR(SRR0)		/* Stored PC for RFI instruction */
1012
 
1013
	MOVW	MSR, R4
1014
	MOVW	$(MSR_IR|MSR_DR|MSR_RI|MSR_FP), R5
1015
	ANDN	R5, R4
1016
	MOVW	R4, SPR(SRR1)
1017
 
1018
	MOVW	SPR(HID0), R4		/* Get HID0 and clear unwanted bits */
1019
	MOVW	$(HID_ICE|HID_DCE), R5
1020
	ANDN	R5, R4
1021
	MOVW	R4, SPR(HID0)		/* Cache disable */
1022
	RFI				/* resume caller with MMU off */
1023
	RETURN
1024
 
1025
TEXT kreboot(SB), $0
1026
	BL	mmudisable(SB)
1027
	MOVW	R3, LR
1028
	RETURN
1029
 
1030
TEXT mul64fract(SB), $0
1031
	MOVW	a0+8(FP), R9
1032
	MOVW	a1+4(FP), R10
1033
	MOVW	b0+16(FP), R4
1034
	MOVW	b1+12(FP), R5
1035
 
1036
	MULLW	R10, R5, R13		/* c2 = lo(a1*b1) */
1037
 
1038
	MULLW	R10, R4, R12		/* c1 = lo(a1*b0) */
1039
	MULHWU	R10, R4, R7		/* hi(a1*b0) */
1040
	ADD	R7, R13			/* c2 += hi(a1*b0) */
1041
 
1042
	MULLW	R9, R5, R6		/* lo(a0*b1) */
1043
	MULHWU	R9, R5, R7		/* hi(a0*b1) */
1044
	ADDC	R6, R12			/* c1 += lo(a0*b1) */
1045
	ADDE	R7, R13			/* c2 += hi(a0*b1) + carry */
1046
 
1047
	MULHWU	R9, R4, R7		/* hi(a0*b0) */
1048
	ADDC	R7, R12			/* c1 += hi(a0*b0) */
1049
	ADDE	R0, R13			/* c2 += carry */
1050
 
1051
	MOVW	R12, 4(R3)
1052
	MOVW	R13, 0(R3)
1053
	RETURN