Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
#include "u.h"
2
#include "../port/lib.h"
3
#include "mem.h"
4
#include "dat.h"
5
#include "fns.h"
6
 
7
#include "arm.h"
8
 
9
#define L1X(va)		FEXT((va), 20, 12)
10
#define L2X(va)		FEXT((va), 12, 8)
11
 
12
enum {
13
	L1lo		= UZERO/MiB,		/* L1X(UZERO)? */
14
	L1hi		= (USTKTOP+MiB-1)/MiB,	/* L1X(USTKTOP+MiB-1)? */
15
};
16
 
17
#define ISHOLE(pte)	((pte) == 0)
18
 
19
/* dump level 1 page table at virtual addr l1 */
20
void
21
mmudump(PTE *l1)
22
{
23
	int i, type, rngtype;
24
	uintptr pa, startva, startpa;
25
	uvlong va, endva;
26
	PTE pte;
27
 
28
//	pa -= MACHSIZE+1024;	/* put level 2 entries below level 1 */
29
//	l2 = KADDR(pa);
30
 
31
	print("\n");
32
	endva = startva = startpa = 0;
33
	rngtype = 0;
34
	/* dump first level of ptes */
35
	for (va = i = 0; i < 4096; i++) {
36
		pte = l1[i];
37
		pa = pte & ~(MB - 1);
38
		type = pte & (Fine|Section|Coarse);
39
		if (ISHOLE(pte)) {
40
			if (endva != 0) {	/* open range? close it */
41
				print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
42
					startva, endva-1, startpa, rngtype);
43
				endva = 0;
44
			}
45
		} else {
46
			if (endva == 0) {	/* no open range? start one */
47
				startva = va;
48
				startpa = pa;
49
				rngtype = type;
50
			}
51
			endva = va + MB;	/* continue the open range */
52
//			if (type == Coarse) {
53
//				// could dump the l2 table for this l1 entry
54
//			}
55
		}
56
		va += MB;
57
	}
58
	if (endva != 0)			/* close an open range */
59
		print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
60
			startva, endva-1, startpa, rngtype);
61
}
62
 
63
/* identity map the megabyte containing va, uncached */
64
static void
65
idmap(PTE *l1, ulong va)
66
{
67
	va &= ~(MB-1);
68
	l1[L1X(va)] = va | Dom0 | L1AP(Krw) | Section;
69
}
70
 
71
/* map `mbs' megabytes from virt to phys */
72
void
73
mmumap(uintptr virt, uintptr phys, int mbs)
74
{
75
	uint off;
76
	PTE *l1;
77
 
78
	phys &= ~(MB-1);
79
	virt &= ~(MB-1);
80
	l1 = KADDR(ttbget());
81
	for (off = 0; mbs-- > 0; off += MB)
82
		l1[L1X(virt + off)] = (phys + off) | Dom0 | L1AP(Krw) | Section;
83
	cacheuwbinv();
84
	l2cacheuwbinv();
85
	mmuinvalidate();
86
}
87
 
88
/* identity map `mbs' megabytes from phys */
89
void
90
mmuidmap(uintptr phys, int mbs)
91
{
92
	mmumap(phys, phys, mbs);
93
}
94
 
95
void
96
mmuinit(void)
97
{
98
	uintptr pa;
99
	PTE *l1, *l2;
100
 
101
	pa = ttbget();
102
	l1 = KADDR(pa);
103
 
104
	/* redundant with l.s; only covers first MB of 17MB */
105
	l1[L1X(VIRTIO)] = PHYSIO|Dom0|L1AP(Krw)|Section;
106
 
107
	idmap(l1, PHYSETHER);		/* igep 9221 ethernet regs */
108
	idmap(l1, PHYSL4PROT);
109
	idmap(l1, PHYSL3);
110
	idmap(l1, PHYSSMS);
111
	idmap(l1, PHYSDRC);
112
	idmap(l1, PHYSGPMC);
113
 
114
	/* map high vectors to start of dram, but only 4K, not 1MB */
115
	pa -= MACHSIZE+2*1024;
116
	l2 = KADDR(pa);
117
	memset(l2, 0, 1024);
118
	/* vectors step on u-boot, but so do page tables */
119
	l2[L2X(HVECTORS)] = PHYSDRAM|L2AP(Krw)|Small;
120
	l1[L1X(HVECTORS)] = pa|Dom0|Coarse;	/* vectors -> ttb-machsize-2k */
121
	coherence();
122
 
123
	cacheuwbinv();
124
	l2cacheuwbinv();
125
	mmuinvalidate();
126
 
127
	m->mmul1 = l1;
128
//	mmudump(l1);			/* DEBUG */
129
}
130
 
131
static void
132
mmul2empty(Proc* proc, int clear)
133
{
134
	PTE *l1;
135
	Page **l2, *page;
136
 
137
	l1 = m->mmul1;
138
	l2 = &proc->mmul2;
139
	for(page = *l2; page != nil; page = page->next){
140
		if(clear)
141
			memset(UINT2PTR(page->va), 0, BY2PG);
142
		l1[page->daddr] = Fault;
143
		l2 = &page->next;
144
	}
145
	*l2 = proc->mmul2cache;
146
	proc->mmul2cache = proc->mmul2;
147
	proc->mmul2 = nil;
148
}
149
 
150
static void
151
mmul1empty(void)
152
{
153
#ifdef notdef
154
/* there's a bug in here */
155
	PTE *l1;
156
 
157
	/* clean out any user mappings still in l1 */
158
	if(m->mmul1lo > L1lo){
159
		if(m->mmul1lo == 1)
160
			m->mmul1[L1lo] = Fault;
161
		else
162
			memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
163
		m->mmul1lo = L1lo;
164
	}
165
	if(m->mmul1hi < L1hi){
166
		l1 = &m->mmul1[m->mmul1hi];
167
		if((L1hi - m->mmul1hi) == 1)
168
			*l1 = Fault;
169
		else
170
			memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
171
		m->mmul1hi = L1hi;
172
	}
173
#else
174
	memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
175
#endif /* notdef */
176
}
177
 
178
void
179
mmuswitch(Proc* proc)
180
{
181
	int x;
182
	PTE *l1;
183
	Page *page;
184
 
185
	/* do kprocs get here and if so, do they need to? */
186
	if(m->mmupid == proc->pid && !proc->newtlb)
187
		return;
188
	m->mmupid = proc->pid;
189
 
190
	/* write back dirty and invalidate l1 caches */
191
	cacheuwbinv();
192
 
193
	if(proc->newtlb){
194
		mmul2empty(proc, 1);
195
		proc->newtlb = 0;
196
	}
197
 
198
	mmul1empty();
199
 
200
	/* move in new map */
201
	l1 = m->mmul1;
202
	for(page = proc->mmul2; page != nil; page = page->next){
203
		x = page->daddr;
204
		l1[x] = PPN(page->pa)|Dom0|Coarse;
205
		/* know here that L1lo < x < L1hi */
206
		if(x+1 - m->mmul1lo < m->mmul1hi - x)
207
			m->mmul1lo = x+1;
208
		else
209
			m->mmul1hi = x;
210
	}
211
 
212
	/* make sure map is in memory */
213
	/* could be smarter about how much? */
214
	cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
215
 
216
	/* lose any possible stale tlb entries */
217
	mmuinvalidate();
218
 
219
	//print("mmuswitch l1lo %d l1hi %d %d\n",
220
	//	m->mmul1lo, m->mmul1hi, proc->kp);
221
}
222
 
223
void
224
flushmmu(void)
225
{
226
	int s;
227
 
228
	s = splhi();
229
	up->newtlb = 1;
230
	mmuswitch(up);
231
	splx(s);
232
}
233
 
234
void
235
mmurelease(Proc* proc)
236
{
237
	Page *page, *next;
238
 
239
	/* write back dirty and invalidate l1 caches */
240
	cacheuwbinv();
241
 
242
	mmul2empty(proc, 0);
243
	for(page = proc->mmul2cache; page != nil; page = next){
244
		next = page->next;
245
		if(--page->ref)
246
			panic("mmurelease: page->ref %d", page->ref);
247
		pagechainhead(page);
248
	}
249
	if(proc->mmul2cache && palloc.r.p)
250
		wakeup(&palloc.r);
251
	proc->mmul2cache = nil;
252
 
253
	mmul1empty();
254
 
255
	/* make sure map is in memory */
256
	/* could be smarter about how much? */
257
	cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
258
 
259
	/* lose any possible stale tlb entries */
260
	mmuinvalidate();
261
}
262
 
263
void
264
putmmu(uintptr va, uintptr pa, Page* page)
265
{
266
	int x;
267
	Page *pg;
268
	PTE *l1, *pte;
269
 
270
	x = L1X(va);
271
	l1 = &m->mmul1[x];
272
	//print("putmmu(%#p, %#p, %#p) ", va, pa, page->pa);
273
	//print("mmul1 %#p l1 %#p *l1 %#ux x %d pid %d\n",
274
	//	m->mmul1, l1, *l1, x, up->pid);
275
	if(*l1 == Fault){
276
		/* wasteful - l2 pages only have 256 entries - fix */
277
		if(up->mmul2cache == nil){
278
			/* auxpg since we don't need much? memset if so */
279
			pg = newpage(1, 0, 0);
280
			pg->va = VA(kmap(pg));
281
		}
282
		else{
283
			pg = up->mmul2cache;
284
			up->mmul2cache = pg->next;
285
			memset(UINT2PTR(pg->va), 0, BY2PG);
286
		}
287
		pg->daddr = x;
288
		pg->next = up->mmul2;
289
		up->mmul2 = pg;
290
 
291
		/* force l2 page to memory */
292
		cachedwbse((void *)pg->va, BY2PG);
293
 
294
		*l1 = PPN(pg->pa)|Dom0|Coarse;
295
		cachedwbse(l1, sizeof *l1);
296
		//print("l1 %#p *l1 %#ux x %d pid %d\n", l1, *l1, x, up->pid);
297
 
298
		if(x >= m->mmul1lo && x < m->mmul1hi){
299
			if(x+1 - m->mmul1lo < m->mmul1hi - x)
300
				m->mmul1lo = x+1;
301
			else
302
				m->mmul1hi = x;
303
		}
304
	}
305
	pte = UINT2PTR(KADDR(PPN(*l1)));
306
	//print("pte %#p index %ld was %#ux\n", pte, L2X(va), *(pte+L2X(va)));
307
 
308
	/* protection bits are
309
	 *	PTERONLY|PTEVALID;
310
	 *	PTEWRITE|PTEVALID;
311
	 *	PTEWRITE|PTEUNCACHED|PTEVALID;
312
	 */
313
	x = Small;
314
	if(!(pa & PTEUNCACHED))
315
		x |= Cached|Buffered;
316
	if(pa & PTEWRITE)
317
		x |= L2AP(Urw);
318
	else
319
		x |= L2AP(Uro);
320
	pte[L2X(va)] = PPN(pa)|x;
321
	cachedwbse(&pte[L2X(va)], sizeof pte[0]);
322
 
323
	/* clear out the current entry */
324
	mmuinvalidateaddr(PPN(va));
325
 
326
	/*  write back dirty entries - we need this because the pio() in
327
	 *  fault.c is writing via a different virt addr and won't clean
328
	 *  its changes out of the dcache.  Page coloring doesn't work
329
	 *  on this mmu because the virtual cache is set associative
330
	 *  rather than direct mapped.
331
	 */
332
	cachedwbinv();
333
	if(page->cachectl[0] == PG_TXTFLUSH){
334
		/* pio() sets PG_TXTFLUSH whenever a text pg has been written */
335
		cacheiinv();
336
		page->cachectl[0] = PG_NOFLUSH;
337
	}
338
	//print("putmmu %#p %#p %#p\n", va, pa, PPN(pa)|x);
339
}
340
 
341
void*
342
mmuuncache(void* v, usize size)
343
{
344
	int x;
345
	PTE *pte;
346
	uintptr va;
347
 
348
	/*
349
	 * Simple helper for ucalloc().
350
	 * Uncache a Section, must already be
351
	 * valid in the MMU.
352
	 */
353
	va = PTR2UINT(v);
354
	assert(!(va & (1*MiB-1)) && size == 1*MiB);
355
 
356
	x = L1X(va);
357
	pte = &m->mmul1[x];
358
	if((*pte & (Fine|Section|Coarse)) != Section)
359
		return nil;
360
	*pte &= ~(Cached|Buffered);
361
	mmuinvalidateaddr(va);
362
	cachedwbinvse(pte, 4);
363
 
364
	return v;
365
}
366
 
367
uintptr
368
mmukmap(uintptr va, uintptr pa, usize size)
369
{
370
	int x;
371
	PTE *pte;
372
 
373
	/*
374
	 * Stub.
375
	 */
376
	assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
377
 
378
	x = L1X(va);
379
	pte = &m->mmul1[x];
380
	if(*pte != Fault)
381
		return 0;
382
	*pte = pa|Dom0|L1AP(Krw)|Section;
383
	mmuinvalidateaddr(va);
384
	cachedwbinvse(pte, 4);
385
 
386
	return va;
387
}
388
 
389
uintptr
390
mmukunmap(uintptr va, uintptr pa, usize size)
391
{
392
	int x;
393
	PTE *pte;
394
 
395
	/*
396
	 * Stub.
397
	 */
398
	assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
399
 
400
	x = L1X(va);
401
	pte = &m->mmul1[x];
402
	if(*pte != (pa|Dom0|L1AP(Krw)|Section))
403
		return 0;
404
	*pte = Fault;
405
	mmuinvalidateaddr(va);
406
	cachedwbinvse(pte, 4);
407
 
408
	return va;
409
}
410
 
411
/*
412
 * Return the number of bytes that can be accessed via KADDR(pa).
413
 * If pa is not a valid argument to KADDR, return 0.
414
 */
415
uintptr
416
cankaddr(uintptr pa)
417
{
418
	if(pa >= PHYSDRAM && pa < PHYSDRAM+memsize)
419
		return PHYSDRAM+memsize - pa;
420
	return 0;
421
}
422
 
423
/* from 386 */
424
void*
425
vmap(uintptr pa, usize size)
426
{
427
	uintptr pae, va;
428
	usize o, osize;
429
 
430
	/*
431
	 * XXX - replace with new vm stuff.
432
	 * Crock after crock - the first 4MB is mapped with 2MB pages
433
	 * so catch that and return good values because the current mmukmap
434
	 * will fail.
435
	 */
436
	if(pa+size < 4*MiB)
437
		return UINT2PTR(kseg0|pa);
438
 
439
	osize = size;
440
	o = pa & (BY2PG-1);
441
	pa -= o;
442
	size += o;
443
	size = ROUNDUP(size, BY2PG);
444
 
445
	va = kseg0|pa;
446
	pae = mmukmap(va, pa, size);
447
	if(pae == 0 || pae-size != pa)
448
		panic("vmap(%#p, %ld) called from %#p: mmukmap fails %#p",
449
			pa+o, osize, getcallerpc(&pa), pae);
450
 
451
	return UINT2PTR(va+o);
452
}
453
 
454
/* from 386 */
455
void
456
vunmap(void* v, usize size)
457
{
458
	/*
459
	 * XXX - replace with new vm stuff.
460
	 * Can't do this until do real vmap for all space that
461
	 * might be used, e.g. stuff below 1MB which is currently
462
	 * mapped automagically at boot but that isn't used (or
463
	 * at least shouldn't be used) by the kernel.
464
	upafree(PADDR(v), size);
465
	 */
466
	USED(v, size);
467
}
468
 
469
/*
470
 * Notes.
471
 * Everything is in domain 0;
472
 * domain 0 access bits in the DAC register are set
473
 * to Client, which means access is controlled by the
474
 * permission values set in the PTE.
475
 *
476
 * L1 access control for the kernel is set to 1 (RW,
477
 * no user mode access);
478
 * L2 access control for the kernel is set to 1 (ditto)
479
 * for all 4 AP sets;
480
 * L1 user mode access is never set;
481
 * L2 access control for user mode is set to either
482
 * 2 (RO) or 3 (RW) depending on whether text or data,
483
 * for all 4 AP sets.
484
 * (To get kernel RO set AP to 0 and S bit in control
485
 * register c1).
486
 * Coarse L1 page-tables are used. They have 256 entries
487
 * and so consume 1024 bytes per table.
488
 * Small L2 page-tables are used. They have 1024 entries
489
 * and so consume 4096 bytes per table.
490
 *
491
 * 4KiB. That's the size of 1) a page, 2) the
492
 * size allocated for an L2 page-table page (note only 1KiB
493
 * is needed per L2 page - to be dealt with later) and
494
 * 3) the size of the area in L1 needed to hold the PTEs
495
 * to map 1GiB of user space (0 -> 0x3fffffff, 1024 entries).
496
 */