Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
/*
2
 * Memory and machine-specific definitions.  Used in C and assembler.
3
 */
4
 
5
/*
6
 * Sizes
7
 */
8
 
9
#define	BI2BY		8			/* bits per byte */
10
#define BI2WD		32			/* bits per word */
11
#define	BY2WD		4			/* bytes per word */
12
#define	BY2PG		4096			/* bytes per page */
13
#define	WD2PG		(BY2PG/BY2WD)		/* words per page */
14
#define	PGSHIFT		12			/* log(BY2PG) */
15
#define PGROUND(s)	(((s)+(BY2PG-1))&~(BY2PG-1))
16
 
17
#define	MAXMACH		1			/* max # cpus system can run */
18
 
19
/*
20
 * Time
21
 */
22
#define	HZ		20			/* clock frequency */
23
#define	MS2HZ		(1000/HZ)		/* millisec per clock tick */
24
#define	TK2SEC(t)	((t)/HZ)		/* ticks to seconds */
25
#define	TK2MS(t)	((((ulong)(t))*1000)/HZ)	/* ticks to milliseconds */
26
#define	MS2TK(t)	((((ulong)(t))*HZ)/1000)	/* milliseconds to ticks */
27
 
28
/*
29
 * PSR bits
30
 */
31
#define	PSREC		0x00002000
32
#define	PSREF		0x00001000
33
#define PSRSUPER	0x00000080
34
#define PSRPSUPER	0x00000040
35
#define	PSRET		0x00000020
36
#define SPL(n)		(n<<8)
37
 
38
/*
39
 * Magic registers
40
 */
41
 
42
#define	MACH		6		/* R6 is m-> */
43
#define	USER		5		/* R5 is u-> */
44
 
45
/*
46
 * Fundamental addresses
47
 */
48
 
49
#define	USERADDR	0xE0000000
50
#define	UREGADDR	(USERADDR+BY2PG-((32+6)*BY2WD))
51
#define	BOOTSTACK	(KTZERO-0*BY2PG)
52
#define	TRAPS		(KTZERO-2*BY2PG)
53
 
54
/*
55
 * MMU
56
 */
57
 
58
#define	VAMASK		0x3FFFFFFF
59
#define	NPMEG		(1<<12)
60
#define	BY2SEGM		(1<<18)
61
#define	PG2SEGM		(1<<6)
62
#define	NTLBPID		(1+NCONTEXT)	/* TLBPID 0 is unallocated */
63
#define	NCONTEXT	8
64
#define	CONTEXT		0x30000000	/* in ASI 2 */
65
 
66
/*
67
 * MMU regions
68
 */
69
#define	INVALIDSEGM	0xFFFC0000	/* highest seg of VA reserved as invalid */
70
#define	INVALIDPMEG	0x7F
71
#define	SCREENSEGM	0xFFF80000
72
#define	SCREENPMEG	0x7E
73
#define	ROMSEGM		0xFFE80000
74
#define	ROMEND		0xFFEA0000
75
#define	PG2ROM		((ROMEND-ROMSEGM)/BY2PG)
76
#define	IOSEGM0		ROMSEGM		/* see mmuinit() */
77
#define	NIOSEGM		((SCREENSEGM-ROMSEGM)/BY2SEGM)
78
#define	IOPMEG0		(SCREENPMEG-NIOSEGM)
79
#define	IOSEGM		ROMEND
80
#define	IOEND		SCREENSEGM
81
#define	TOPPMEG		IOPMEG0
82
 
83
/*
84
 * MMU entries
85
 */
86
#define	PTEVALID	(1<<31)
87
#define	PTERONLY	(0<<30)
88
#define	PTEWRITE	(1<<30)
89
#define	PTEKERNEL	(1<<29)
90
#define	PTENOCACHE	(1<<28)
91
#define	PTEMAINMEM	(0<<26)
92
#define	PTEIO		(1<<26)
93
#define	PTEACCESS	(1<<25)
94
#define	PTEMODIFY	(1<<24)
95
#define PTEUNCACHED	0
96
#define PTEMAPMEM	(1024*1024)	
97
#define	PTEPERTAB	(PTEMAPMEM/BY2PG)
98
#define SEGMAPSIZE	16
99
 
100
#define	INVALIDPTE	0
101
#define	PPN(pa)		((pa>>12)&0xFFFF)
102
 
103
/*
104
 * Weird addresses in various ASI's
105
 */
106
#define	CACHETAGS	0x80000000		/* ASI 2 */
107
#define	CACHEDATA	0x90000000		/* ASI 2 */
108
#define	SER		0x60000000		/* ASI 2 */
109
#define	SEVAR		0x60000004		/* ASI 2 */
110
#define	ASER		0x60000008		/* ASI 2 */
111
#define	ASEVAR		0x6000000C		/* ASI 2 */
112
#define	ENAB		0x40000000		/* ASI 2 */
113
#define	ENABCACHE	0x10
114
#define	ENABRESET	0x04
115
 
116
/*
117
 * Virtual addresses
118
 */
119
#define	VTAG(va)	((va>>22)&0x03F)
120
#define	VPN(va)		((va>>13)&0x1FF)
121
 
122
#define	PARAM		((char*)0x40500000)
123
#define	TLBFLUSH_	0x01
124
 
125
/*
126
 * Address spaces
127
 */
128
 
129
#define	UZERO	0x00000000		/* base of user address space */
130
#define	UTZERO	(UZERO+BY2PG)		/* first address in user text */
131
#define	TSTKTOP	0x10000000		/* end of new stack in sysexec */
132
#define TSTKSIZ 32
133
#define	USTKTOP	(TSTKTOP-TSTKSIZ*BY2PG)	/* byte just beyond user stack */
134
#define	KZERO	0xE0000000		/* base of kernel address space */
135
#define	KTZERO	(KZERO+4*BY2PG)		/* first address in kernel text */
136
#define	USTKSIZE	(4*1024*1024)	/* size of user stack */
137
 
138
#define	MACHSIZE	4096
139
 
140
#define isphys(x) (((ulong)(x)&0xF0000000) == KZERO)
141
 
142
#define	SYSPSR	(SPL(0x0)|PSREF|PSRSUPER|0)
143
#define	NOOP	OR R0, R0; OR R0, R0; OR R0, R0
144
 
145
TEXT	start(SB), $-4
146
 
147
	/* get virtual, fast */
148
	/* we are executing in segment 0, mapped to pmeg 0. stack is there too */
149
	/* get virtual by mapping segment(KZERO) to pmeg 0., and next to 1 */
150
	MOVW	$KZERO, R7
151
	MOVB	R0, (R7, 3)
152
	MOVW	$(KZERO+BY2SEGM), R7
153
	MOVW	$1, R8
154
	MOVB	R8, (R7, 3)
155
	/* now mapped correctly.  jmpl to where we want to be */
156
	MOVW	$setSB(SB), R2
157
	MOVW	$startvirt(SB), R7
158
	JMPL	(R7)
159
	MOVW	$_mul(SB), R0	/* touch _mul etc.; doesn't need to execute */
160
	RETURN			/* can't get here */
161
 
162
TEXT	startvirt(SB), $-4
163
 
164
	MOVW	$BOOTSTACK, R1
165
 
166
	MOVW	$(SPL(0xF)|PSREF|PSRSUPER), R7
167
	MOVW	R7, PSR
168
 
169
	MOVW	$(0x35<<22), R7		/* NVM OFM DZM AU */
170
	MOVW	R7, fsr+0(SB)
171
	MOVW	fsr+0(SB), FSR
172
	FMOVD	$0.5, F26		/* 0.5 -> F26 */
173
	FSUBD	F26, F26, F24		/* 0.0 -> F24 */
174
	FADDD	F26, F26, F28		/* 1.0 -> F28 */
175
	FADDD	F28, F28, F30		/* 2.0 -> F30 */
176
 
177
	FMOVD	F24, F0
178
	FMOVD	F24, F2
179
	FMOVD	F24, F4
180
	FMOVD	F24, F6
181
	FMOVD	F24, F8
182
	FMOVD	F24, F10
183
	FMOVD	F24, F12
184
	FMOVD	F24, F14
185
	FMOVD	F24, F16
186
	FMOVD	F24, F18
187
	FMOVD	F24, F20
188
	FMOVD	F24, F22
189
 
190
	MOVW	$mach0(SB), R(MACH)
191
/*	MOVW	$0x8, R7 /**/
192
	MOVW	R0, WIM
193
	JMPL	main(SB)
194
	MOVW	(R0), R0
195
	RETURN
196
 
197
TEXT	swap1(SB), $0
198
 
199
	TAS	(R7), R7		/* LDSTUB, thank you ken */
200
	RETURN
201
 
202
TEXT	swap1_should_work(SB), $0
203
 
204
	MOVW	R7, R8
205
	MOVW	$1, R7
206
	SWAP	(R8), R7
207
	RETURN
208
 
209
TEXT	swap1x(SB), $0
210
 
211
	MOVW	PSR, R9
212
	MOVW	R9, R10
213
	AND	$~PSRET, R10		/* BUG: book says this is buggy */
214
	MOVW	R10, PSR
215
	NOOP
216
	MOVW	(R7), R7
217
	CMP	R7, R0
218
	BNE	was1
219
	MOVW	$1, R10
220
	MOVW	R10, (R8)
221
was1:
222
	MOVW	R9, PSR
223
	RETURN
224
 
225
TEXT	spllo(SB), $0
226
 
227
	MOVW	PSR, R7
228
	MOVW	R7, R10
229
	OR	$PSRET, R10
230
	MOVW	R10, PSR
231
	NOOP
232
	RETURN
233
 
234
TEXT	splhi(SB), $0
235
 
236
	MOVW	R15, 4(R(MACH))	/* save PC in m->splpc */
237
	MOVW	PSR, R7
238
	MOVW	R7, R10
239
	AND	$~PSRET, R10	/* BUG: book says this is buggy */
240
	MOVW	R10, PSR
241
	NOOP
242
	RETURN
243
 
244
TEXT	splx(SB), $0
245
 
246
	MOVW	R15, 4(R(MACH))	/* save PC in m->splpc */
247
	MOVW	R7, PSR		/* BUG: book says this is buggy */
248
	NOOP
249
	RETURN
250
 
251
TEXT	spldone(SB), $0
252
 
253
	RETURN
254
 
255
TEXT	touser(SB), $0
256
	MOVW	$(SYSPSR&~PSREF), R8
257
	MOVW	R8, PSR
258
	NOOP
259
 
260
	MOVW	R7, R1
261
	SAVE	R0, R0			/* RETT is implicit RESTORE */
262
	MOVW	$(UTZERO+32), R7	/* PC; header appears in text */
263
	MOVW	$(UTZERO+32+4), R8	/* nPC */
264
	RETT	R7, R8
265
 
266
TEXT	rfnote(SB), $0
267
 
268
	MOVW	R7, R1			/* 1st arg is &uregpointer */
269
	ADD	$4, R1			/* point at ureg */
270
	JMP	restore
271
 
272
TEXT	traplink(SB), $-4
273
 
274
	/* R8 to R23 are free to play with */
275
	/* R17 contains PC, R18 contains nPC */
276
	/* R19 has PSR loaded from vector code */
277
 
278
	ANDCC	$PSRPSUPER, R19, R0
279
	BE	usertrap
280
 
281
kerneltrap:
282
	/*
283
	 * Interrupt or fault from kernel
284
	 */
285
	ANDN	$7, R1, R20			/* dbl aligned */
286
	MOVW	R1, (0-(4*(32+6))+(4*1))(R20)	/* save R1=SP */
287
	/* really clumsy: store these in Ureg so can be restored below */
288
	MOVW	R2, (0-(4*(32+6))+(4*2))(R20)	/* SB */
289
	MOVW	R5, (0-(4*(32+6))+(4*5))(R20)	/* USER */
290
	MOVW	R6, (0-(4*(32+6))+(4*6))(R20)	/* MACH */
291
	SUB	$(4*(32+6)), R20, R1
292
 
293
trap1:
294
	MOVW	Y, R20
295
	MOVW	R20, (4*(32+0))(R1)		/* Y */
296
	MOVW	TBR, R20
297
	MOVW	R20, (4*(32+1))(R1)		/* TBR */
298
	AND	$~0x1F, R19			/* force CWP=0 */
299
	MOVW	R19, (4*(32+2))(R1)		/* PSR */
300
	MOVW	R18, (4*(32+3))(R1)		/* nPC */
301
	MOVW	R17, (4*(32+4))(R1)		/* PC */
302
	MOVW	R0, (4*0)(R1)
303
	MOVW	R3, (4*3)(R1)
304
	MOVW	R4, (4*4)(R1)
305
	MOVW	R7, (4*7)(R1)
306
	RESTORE	R0, R0
307
	/* now our registers R8-R31 are same as before trap */
308
	/* save registers two at a time */
309
	MOVD	R8, (4*8)(R1)
310
	MOVD	R10, (4*10)(R1)
311
	MOVD	R12, (4*12)(R1)
312
	MOVD	R14, (4*14)(R1)
313
	MOVD	R16, (4*16)(R1)
314
	MOVD	R18, (4*18)(R1)
315
	MOVD	R20, (4*20)(R1)
316
	MOVD	R22, (4*22)(R1)
317
	MOVD	R24, (4*24)(R1)
318
	MOVD	R26, (4*26)(R1)
319
	MOVD	R28, (4*28)(R1)
320
	MOVD	R30, (4*30)(R1)
321
	/* SP and SB and u and m are already set; away we go */
322
	MOVW	R1, R7		/* pointer to Ureg */
323
	SUB	$8, R1
324
	MOVW	$SYSPSR, R8
325
	MOVW	R8, PSR
326
	NOOP
327
	JMPL	trap(SB)
328
 
329
	ADD	$8, R1
330
restore:
331
	MOVW	(4*(32+2))(R1), R8		/* PSR */
332
	MOVW	R8, PSR
333
	NOOP
334
 
335
	MOVD	(4*30)(R1), R30
336
	MOVD	(4*28)(R1), R28
337
	MOVD	(4*26)(R1), R26
338
	MOVD	(4*24)(R1), R24
339
	MOVD	(4*22)(R1), R22
340
	MOVD	(4*20)(R1), R20
341
	MOVD	(4*18)(R1), R18
342
	MOVD	(4*16)(R1), R16
343
	MOVD	(4*14)(R1), R14
344
	MOVD	(4*12)(R1), R12
345
	MOVD	(4*10)(R1), R10
346
	MOVD	(4*8)(R1), R8
347
	SAVE	R0, R0
348
	MOVD	(4*6)(R1), R6
349
	MOVD	(4*4)(R1), R4
350
	MOVD	(4*2)(R1), R2
351
	MOVW	(4*(32+0))(R1), R20		/* Y */
352
	MOVW	R20, Y
353
	MOVW	(4*(32+4))(R1), R17		/* PC */
354
	MOVW	(4*(32+3))(R1), R18		/* nPC */
355
	MOVW	(4*1)(R1), R1	/* restore R1=SP */
356
	RETT	R17, R18
357
 
358
usertrap:
359
	/*
360
	 * Interrupt or fault from user
361
	 */
362
	MOVW	R1, R8
363
	MOVW	R2, R9
364
	MOVW	$setSB(SB), R2
365
	MOVW	$(USERADDR+BY2PG), R1
366
	MOVW	R8, (0-(4*(32+6))+(4*1))(R1)	/* save R1=SP */
367
	MOVW	R9, (0-(4*(32+6))+(4*2))(R1)	/* save R2=SB */
368
	MOVW	R5, (0-(4*(32+6))+(4*5))(R1)	/* save R5=USER */
369
	MOVW	R6, (0-(4*(32+6))+(4*6))(R1)	/* save R6=MACH */
370
	MOVW	$USERADDR, R(USER)
371
	MOVW	$mach0(SB), R(MACH)
372
	SUB	$(4*(32+6)), R1
373
	JMP	trap1
374
 
375
TEXT	syslink(SB), $-4
376
 
377
	/* R8 to R23 are free to play with */
378
	/* R17 contains PC, R18 contains nPC */
379
	/* R19 has PSR loaded from vector code */
380
	/* assume user did it; syscall checks */
381
 
382
	MOVW	R1, R8
383
	MOVW	R2, R9
384
	MOVW	$setSB(SB), R2
385
	MOVW	$(USERADDR+BY2PG), R1
386
	MOVW	R8, (0-(4*(32+6))+4)(R1)	/* save R1=SP */
387
	SUB	$(4*(32+6)), R1
388
	MOVW	R9, (4*2)(R1)			/* save R2=SB */
389
	MOVW	R3, (4*3)(R1)			/* global register */
390
	MOVD	R4, (4*4)(R1)			/* global register, R5=USER */
391
	MOVD	R6, (4*6)(R1)			/* save R6=MACH, R7=syscall# */
392
	MOVW	$USERADDR, R(USER)
393
	MOVW	$mach0(SB), R(MACH)
394
	MOVW	TBR, R20
395
	MOVW	R20, (4*(32+1))(R1)		/* TBR */
396
	AND	$~0x1F, R19
397
	MOVW	R19, (4*(32+2))(R1)		/* PSR */
398
	MOVW	R18, (4*(32+3))(R1)		/* nPC */
399
	MOVW	R17, (4*(32+4))(R1)		/* PC */
400
	RESTORE	R0, R0
401
	/* now our registers R8-R31 are same as before trap */
402
	MOVW	R15, (4*15)(R1)
403
	/* SP and SB and u and m are already set; away we go */
404
	MOVW	R1, R7			/* pointer to Ureg */
405
	SUB	$8, R1
406
	MOVW	$SYSPSR, R8
407
	MOVW	R8, PSR
408
	JMPL	syscall(SB)
409
	/* R7 contains return value from syscall */
410
 
411
	ADD	$8, R1
412
	MOVW	(4*(32+2))(R1), R8		/* PSR */
413
	MOVW	R8, PSR
414
	NOOP
415
 
416
	MOVW	(4*15)(R1), R15
417
	SAVE	R0, R0
418
	MOVW	(4*6)(R1), R6
419
	MOVD	(4*4)(R1), R4
420
	MOVD	(4*2)(R1), R2
421
	MOVW	(4*(32+4))(R1), R17		/* PC */
422
	MOVW	(4*(32+3))(R1), R18		/* nPC */
423
	MOVW	(4*1)(R1), R1	/* restore R1=SP */
424
	RETT	R17, R18
425
 
426
TEXT	puttbr(SB), $0
427
 
428
	MOVW	R7, TBR
429
	NOOP
430
	RETURN
431
 
432
TEXT	gettbr(SB), $0
433
 
434
	MOVW	TBR, R7
435
	RETURN
436
 
437
TEXT	r1(SB), $0
438
 
439
	MOVW	R1, R7
440
	RETURN
441
 
442
TEXT	getwim(SB), $0
443
 
444
	MOVW	WIM, R7
445
	RETURN
446
 
447
TEXT	setlabel(SB), $0
448
 
449
	MOVW	R1, (R7)
450
	MOVW	R15, 4(R7)
451
	MOVW	$0, R7
452
	RETURN
453
 
454
TEXT	gotolabel(SB), $0
455
 
456
	MOVW	(R7), R1
457
	MOVW	4(R7), R15
458
	MOVW	$1, R7
459
	RETURN
460
 
461
TEXT	putcxsegm(SB), $0
462
 
463
	MOVW	R7, R8			/* context */
464
	MOVW	4(FP), R9		/* segment addr */
465
	MOVW	8(FP), R10		/* segment value */
466
	MOVW	$0xFFE80118, R7
467
	JMPL	(R7)
468
	RETURN
469
 
470
TEXT	getpsr(SB), $0
471
 
472
	MOVW	PSR, R7
473
	RETURN
474
 
475
TEXT	putcxreg(SB), $0
476
 
477
	MOVW	$CONTEXT, R8
478
	MOVB	R7, (R8, 2)
479
	RETURN
480
 
481
TEXT	putb2(SB), $0
482
 
483
	MOVW	4(FP), R8
484
	MOVB	R8, (R7, 2)
485
	RETURN
486
 
487
TEXT	getb2(SB), $0
488
 
489
	MOVB	(R7, 2), R7
490
	RETURN
491
 
492
TEXT	getw2(SB), $0
493
 
494
	MOVW	(R7, 2), R7
495
	RETURN
496
 
497
TEXT	putw2(SB), $0
498
 
499
	MOVW	4(FP), R8
500
	MOVW	R8, (R7, 2)
501
	RETURN
502
 
503
TEXT	putw4(SB), $0
504
 
505
	MOVW	4(FP), R8
506
	MOVW	R8, (R7, 4)
507
	RETURN
508
 
509
TEXT	getw4(SB), $0
510
 
511
	MOVW	(R7, 4), R7
512
	RETURN
513
 
514
TEXT	putwC(SB), $0
515
 
516
	MOVW	4(FP), R8
517
	MOVW	R8, (R7, 0xC)
518
	RETURN
519
 
520
TEXT	putwD(SB), $0
521
 
522
	MOVW	4(FP), R8
523
	MOVW	R8, (R7, 0xD)
524
	RETURN
525
 
526
TEXT	putwD16(SB), $0
527
 
528
	MOVW	4(FP), R8
529
	MOVW	R8, (R7, 0xD)
530
	ADD	$(1<<4), R7
531
	MOVW	R8, (R7, 0xD)
532
	ADD	$(1<<4), R7
533
	MOVW	R8, (R7, 0xD)
534
	ADD	$(1<<4), R7
535
	MOVW	R8, (R7, 0xD)
536
	ADD	$(1<<4), R7
537
	MOVW	R8, (R7, 0xD)
538
	ADD	$(1<<4), R7
539
	MOVW	R8, (R7, 0xD)
540
	ADD	$(1<<4), R7
541
	MOVW	R8, (R7, 0xD)
542
	ADD	$(1<<4), R7
543
	MOVW	R8, (R7, 0xD)
544
	ADD	$(1<<4), R7
545
	MOVW	R8, (R7, 0xD)
546
	ADD	$(1<<4), R7
547
	MOVW	R8, (R7, 0xD)
548
	ADD	$(1<<4), R7
549
	MOVW	R8, (R7, 0xD)
550
	ADD	$(1<<4), R7
551
	MOVW	R8, (R7, 0xD)
552
	ADD	$(1<<4), R7
553
	MOVW	R8, (R7, 0xD)
554
	ADD	$(1<<4), R7
555
	MOVW	R8, (R7, 0xD)
556
	ADD	$(1<<4), R7
557
	MOVW	R8, (R7, 0xD)
558
	ADD	$(1<<4), R7
559
	MOVW	R8, (R7, 0xD)
560
	RETURN
561
 
562
TEXT	putwE(SB), $0
563
 
564
	MOVW	4(FP), R8
565
	MOVW	R8, (R7, 0xE)
566
	RETURN
567
 
568
TEXT	putwE16(SB), $0
569
 
570
	MOVW	4(FP), R8
571
	MOVW	R8, (R7, 0xE)
572
	ADD	$(1<<4), R7
573
	MOVW	R8, (R7, 0xE)
574
	ADD	$(1<<4), R7
575
	MOVW	R8, (R7, 0xE)
576
	ADD	$(1<<4), R7
577
	MOVW	R8, (R7, 0xE)
578
	ADD	$(1<<4), R7
579
	MOVW	R8, (R7, 0xE)
580
	ADD	$(1<<4), R7
581
	MOVW	R8, (R7, 0xE)
582
	ADD	$(1<<4), R7
583
	MOVW	R8, (R7, 0xE)
584
	ADD	$(1<<4), R7
585
	MOVW	R8, (R7, 0xE)
586
	ADD	$(1<<4), R7
587
	MOVW	R8, (R7, 0xE)
588
	ADD	$(1<<4), R7
589
	MOVW	R8, (R7, 0xE)
590
	ADD	$(1<<4), R7
591
	MOVW	R8, (R7, 0xE)
592
	ADD	$(1<<4), R7
593
	MOVW	R8, (R7, 0xE)
594
	ADD	$(1<<4), R7
595
	MOVW	R8, (R7, 0xE)
596
	ADD	$(1<<4), R7
597
	MOVW	R8, (R7, 0xE)
598
	ADD	$(1<<4), R7
599
	MOVW	R8, (R7, 0xE)
600
	ADD	$(1<<4), R7
601
	MOVW	R8, (R7, 0xE)
602
	RETURN
603
 
604
TEXT	putsegm(SB), $0
605
 
606
	MOVW	4(FP), R8
607
	MOVW	R8, (R7, 3)
608
	RETURN
609
 
610
/*
611
 * in savefpregs and restfpregs, incoming R7 points to doubleword
612
 * below where F0 will go; doubleword align in and backfill FSR
613
 */
614
TEXT	savefpregs(SB), $0
615
 
616
	ADD	$8, R7
617
	ANDN	$7, R7		/* now MOVD-aligned */
618
	MOVW	FSR, -4(R7)
619
 
620
	MOVD	F0, (0*4)(R7)
621
	MOVD	F2, (2*4)(R7)
622
	MOVD	F4, (4*4)(R7)
623
	MOVD	F6, (6*4)(R7)
624
	MOVD	F8, (8*4)(R7)
625
	MOVD	F10, (10*4)(R7)
626
	MOVD	F12, (12*4)(R7)
627
	MOVD	F14, (14*4)(R7)
628
	MOVD	F16, (16*4)(R7)
629
	MOVD	F18, (18*4)(R7)
630
	MOVD	F20, (20*4)(R7)
631
	MOVD	F22, (22*4)(R7)
632
	MOVD	F24, (24*4)(R7)
633
	MOVD	F26, (26*4)(R7)
634
	MOVD	F28, (28*4)(R7)
635
	MOVD	F30, (30*4)(R7)
636
 
637
	MOVW	PSR, R8
638
	ANDN	$PSREF, R8
639
	MOVW	R8, PSR
640
	RETURN
641
 
642
TEXT	restfpregs(SB), $0
643
 
644
	MOVW	PSR, R8
645
	OR	$PSREF, R8
646
	MOVW	R8, PSR
647
 
648
	ADD	$8, R7
649
	ANDN	$7, R7		/* now MOVD-aligned */
650
	OR	R0, R0
651
 
652
	MOVW	-4(R7), FSR
653
 
654
	MOVD	(0*4)(R7), F0
655
	MOVD	(2*4)(R7), F2
656
	MOVD	(4*4)(R7), F4
657
	MOVD	(6*4)(R7), F6
658
	MOVD	(8*4)(R7), F8
659
	MOVD	(10*4)(R7), F10
660
	MOVD	(12*4)(R7), F12
661
	MOVD	(14*4)(R7), F14
662
	MOVD	(16*4)(R7), F16
663
	MOVD	(18*4)(R7), F18
664
	MOVD	(20*4)(R7), F20
665
	MOVD	(22*4)(R7), F22
666
	MOVD	(24*4)(R7), F24
667
	MOVD	(26*4)(R7), F26
668
	MOVD	(28*4)(R7), F28
669
	MOVD	(30*4)(R7), F30
670
 
671
	ANDN	$PSREF, R8
672
	MOVW	R8, PSR
673
	RETURN
674
 
675
TEXT	clearfpintr(SB), $0
676
 
677
	MOVW	$fpq+BY2WD(SB), R7
678
	ANDN	$0x7, R7		/* must be D aligned */
679
	MOVW	$fsr+0(SB), R9
680
clrq:
681
	MOVD	FQ, (R7)
682
	MOVW	FSR, (R9)
683
	MOVW	(R9), R8
684
	AND	$(1<<13), R8		/* queue not empty? */
685
	BNE	clrq
686
	RETURN
687
 
688
TEXT	getfsr(SB), $0
689
	MOVW	$fsr+0(SB), R7
690
	MOVW	FSR, (R7)
691
	MOVW	(R7), R7
692
	RETURN
693
 
694
GLOBL	mach0+0(SB), $MACHSIZE
695
GLOBL	fpq+0(SB), $(3*BY2WD)
696
GLOBL	fsr+0(SB), $BY2WD