Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
/*
2
 * Size memory and create the kernel page-tables on the fly while doing so.
3
 * Called from main(), this code should only be run by the bootstrap processor.
4
 *
5
 * MemMin is what the bootstrap code in l.s has already mapped;
6
 * MemMax is the limit of physical memory to scan.
7
 */
8
#include "u.h"
9
#include "../port/lib.h"
10
#include "mem.h"
11
#include "dat.h"
12
#include "fns.h"
13
#include "io.h"
14
#include "ureg.h"
15
 
16
#define MEMDEBUG	0
17
 
18
enum {
19
	MemUPA		= 0,		/* unbacked physical address */
20
	MemRAM		= 1,		/* physical memory */
21
	MemUMB		= 2,		/* upper memory block (<16MB) */
22
	MemReserved	= 3,
23
	NMemType	= 4,
24
 
25
	KB		= 1024,
26
};
27
 
28
typedef struct Map Map;
29
struct Map {
30
	ulong	size;
31
	ulong	addr;
32
};
33
 
34
typedef struct RMap RMap;
35
struct RMap {
36
	char*	name;
37
	Map*	map;
38
	Map*	mapend;
39
 
40
	Lock;
41
};
42
 
43
/* 
44
 * Memory allocation tracking.
45
 */
46
static Map mapupa[16];
47
static RMap rmapupa = {
48
	"unallocated unbacked physical addresses",
49
	mapupa,
50
	&mapupa[nelem(mapupa)-1],
51
};
52
 
53
static Map mapram[16];
54
static RMap rmapram = {
55
	"physical memory",
56
	mapram,
57
	&mapram[nelem(mapram)-1],
58
};
59
 
60
static Map mapumb[64];
61
static RMap rmapumb = {
62
	"upper memory block",
63
	mapumb,
64
	&mapumb[nelem(mapumb)-1],
65
};
66
 
67
static Map mapumbrw[16];
68
static RMap rmapumbrw = {
69
	"UMB device memory",
70
	mapumbrw,
71
	&mapumbrw[nelem(mapumbrw)-1],
72
};
73
 
74
static void map(ulong base, ulong len, int type);
75
 
76
void
77
mapprint(RMap *rmap)
78
{
79
	Map *mp;
80
 
81
	print("%s\n", rmap->name);	
82
	for(mp = rmap->map; mp->size; mp++)
83
		print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
84
}
85
 
86
 
87
void
88
memdebug(void)
89
{
90
	ulong maxpa, maxpa1, maxpa2;
91
 
92
	maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
93
	maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
94
	maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
95
	print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
96
		maxpa, MB+maxpa*KB, maxpa1, maxpa2);
97
 
98
	mapprint(&rmapram);
99
	mapprint(&rmapumb);
100
	mapprint(&rmapumbrw);
101
	mapprint(&rmapupa);
102
}
103
 
104
void
105
mapfree(RMap* rmap, ulong addr, ulong size)
106
{
107
	Map *mp;
108
	ulong t;
109
 
110
	if(size <= 0)
111
		return;
112
 
113
	lock(rmap);
114
	for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
115
		;
116
 
117
	if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
118
		(mp-1)->size += size;
119
		if(addr+size == mp->addr){
120
			(mp-1)->size += mp->size;
121
			while(mp->size){
122
				mp++;
123
				(mp-1)->addr = mp->addr;
124
				(mp-1)->size = mp->size;
125
			}
126
		}
127
	}
128
	else{
129
		if(addr+size == mp->addr && mp->size){
130
			mp->addr -= size;
131
			mp->size += size;
132
		}
133
		else do{
134
			if(mp >= rmap->mapend){
135
				print("mapfree: %s: losing %#luX, %ld\n",
136
					rmap->name, addr, size);
137
				break;
138
			}
139
			t = mp->addr;
140
			mp->addr = addr;
141
			addr = t;
142
			t = mp->size;
143
			mp->size = size;
144
			mp++;
145
		}while(size = t);
146
	}
147
	unlock(rmap);
148
}
149
 
150
ulong
151
mapalloc(RMap* rmap, ulong addr, int size, int align)
152
{
153
	Map *mp;
154
	ulong maddr, oaddr;
155
 
156
	lock(rmap);
157
	for(mp = rmap->map; mp->size; mp++){
158
		maddr = mp->addr;
159
 
160
		if(addr){
161
			/*
162
			 * A specific address range has been given:
163
			 *   if the current map entry is greater then
164
			 *   the address is not in the map;
165
			 *   if the current map entry does not overlap
166
			 *   the beginning of the requested range then
167
			 *   continue on to the next map entry;
168
			 *   if the current map entry does not entirely
169
			 *   contain the requested range then the range
170
			 *   is not in the map.
171
			 */
172
			if(maddr > addr)
173
				break;
174
			if(mp->size < addr - maddr)	/* maddr+mp->size < addr, but no overflow */
175
				continue;
176
			if(addr - maddr > mp->size - size)	/* addr+size > maddr+mp->size, but no overflow */
177
				break;
178
			maddr = addr;
179
		}
180
 
181
		if(align > 0)
182
			maddr = ((maddr+align-1)/align)*align;
183
		if(mp->addr+mp->size-maddr < size)
184
			continue;
185
 
186
		oaddr = mp->addr;
187
		mp->addr = maddr+size;
188
		mp->size -= maddr-oaddr+size;
189
		if(mp->size == 0){
190
			do{
191
				mp++;
192
				(mp-1)->addr = mp->addr;
193
			}while((mp-1)->size = mp->size);
194
		}
195
 
196
		unlock(rmap);
197
		if(oaddr != maddr)
198
			mapfree(rmap, oaddr, maddr-oaddr);
199
 
200
		return maddr;
201
	}
202
	unlock(rmap);
203
 
204
	return 0;
205
}
206
 
207
/*
208
 * Allocate from the ram map directly to make page tables.
209
 * Called by mmuwalk during e820scan.
210
 */
211
void*
212
rampage(void)
213
{
214
	ulong m;
215
 
216
	m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
217
	if(m == 0)
218
		return nil;
219
	return KADDR(m);
220
}
221
 
222
static void
223
umbexclude(void)
224
{
225
	int size;
226
	ulong addr;
227
	char *op, *p, *rptr;
228
 
229
	if((p = getconf("umbexclude")) == nil)
230
		return;
231
 
232
	while(p && *p != '\0' && *p != '\n'){
233
		op = p;
234
		addr = strtoul(p, &rptr, 0);
235
		if(rptr == nil || rptr == p || *rptr != '-'){
236
			print("umbexclude: invalid argument <%s>\n", op);
237
			break;
238
		}
239
		p = rptr+1;
240
 
241
		size = strtoul(p, &rptr, 0) - addr + 1;
242
		if(size <= 0){
243
			print("umbexclude: bad range <%s>\n", op);
244
			break;
245
		}
246
		if(rptr != nil && *rptr == ',')
247
			*rptr++ = '\0';
248
		p = rptr;
249
 
250
		mapalloc(&rmapumb, addr, size, 0);
251
	}
252
}
253
 
254
static void
255
umbscan(void)
256
{
257
	uchar o[2], *p;
258
 
259
	/*
260
	 * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
261
	 * which aren't used; they can be used later for devices which
262
	 * want to allocate some virtual address space.
263
	 * Check for two things:
264
	 * 1) device BIOS ROM. This should start with a two-byte header
265
	 *    of 0x55 0xAA, followed by a byte giving the size of the ROM
266
	 *    in 512-byte chunks. These ROM's must start on a 2KB boundary.
267
	 * 2) device memory. This is read-write.
268
	 * There are some assumptions: there's VGA memory at 0xA0000 and
269
	 * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
270
	 * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
271
	 * for grabs; check anyway.
272
	 */
273
	p = KADDR(0xD0000);
274
	while(p < (uchar*)KADDR(0xE0000)){
275
		/*
276
		 * Check for the ROM signature, skip if valid.
277
		 */
278
		if(p[0] == 0x55 && p[1] == 0xAA){
279
			p += p[2]*512;
280
			continue;
281
		}
282
 
283
		/*
284
		 * Is it writeable? If yes, then stick it in
285
		 * the UMB device memory map. A floating bus will
286
		 * return 0xff, so add that to the map of the
287
		 * UMB space available for allocation.
288
		 * If it is neither of those, ignore it.
289
		 */
290
		o[0] = p[0];
291
		p[0] = 0xCC;
292
		o[1] = p[2*KB-1];
293
		p[2*KB-1] = 0xCC;
294
		if(p[0] == 0xCC && p[2*KB-1] == 0xCC){
295
			p[0] = o[0];
296
			p[2*KB-1] = o[1];
297
			mapfree(&rmapumbrw, PADDR(p), 2*KB);
298
		}
299
		else if(p[0] == 0xFF && p[1] == 0xFF)
300
			mapfree(&rmapumb, PADDR(p), 2*KB);
301
		p += 2*KB;
302
	}
303
 
304
	p = KADDR(0xE0000);
305
	if(p[0] != 0x55 || p[1] != 0xAA){
306
		p[0] = 0xCC;
307
		p[64*KB-1] = 0xCC;
308
		if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
309
			mapfree(&rmapumb, PADDR(p), 64*KB);
310
	}
311
 
312
	umbexclude();
313
}
314
 
315
enum {
316
	Pteflags = (1<<12) - 1,
317
};
318
 
319
void
320
dumppdb(ulong *pdb)
321
{
322
	ulong *pp;
323
 
324
	pdb = (ulong *)((uintptr)pdb & ~Pteflags);
325
	iprint("pdb at phys %#8.8p:\n", PADDR(pdb));
326
	for (pp = pdb; pp < pdb + 1024; pp++)
327
		if (*pp)
328
			iprint("pdb[%3ld]: %#8.8lux\n", pp - pdb, *pp);
329
}
330
 
331
void
332
dumppte(ulong *pdb, int sub, int first)
333
{
334
	ulong *pp, *pte;
335
 
336
	pte = KADDR(pdb[sub]);
337
	pte = (ulong *)((uintptr)pte & ~Pteflags);
338
	if (PADDR(pte) == 0) {
339
		iprint("pdb[%d] unmapped\n", sub);
340
		return;
341
	}
342
	iprint("pdb[%d] pte at phys %#8.8p:\n", sub, PADDR(pte));
343
	for (pp = pte; pp < pte + first; pp++)
344
		if (*pp)
345
			iprint("pte[%3ld]: %#8.8lux\n", pp - pte, *pp);
346
	iprint("...\n");
347
}
348
 
349
uintptr
350
mapping(uintptr va)
351
{
352
	ulong *pte;
353
 
354
	pte = KADDR(m->pdb[PDX(va)] & ~Pteflags);
355
	return pte[PTX(va)] & ~Pteflags;
356
}
357
 
358
/*
359
 * adjust the maps and make the mmu mappings match the maps
360
 */
361
static void
362
lowraminit(void)
363
{
364
	/*
365
	 * low memory is in use by bootstrap kernels and ROMs.
366
	 * MemReserved is untouchable, so use MemRAM.
367
	 * address zero is special to mapalloc, and thus to map, so avoid it.
368
	 * we can thus load the new kernel directly at 1MB and up.
369
	 */
370
//	map(BY2PG, MB - BY2PG, MemRAM)	/* executing this map call is fatal */
371
	mapalloc(&rmapram, BY2PG, Mallocbase - BY2PG, 0);
372
 
373
	/*
374
	 * declare all RAM above Mallocbase to be free.
375
	 */
376
	map(Mallocbase, MemMax - Mallocbase, MemRAM);
377
 
378
	/* declare rest of physical address space above RAM to be available */
379
	map(MemMax, KZERO-MemMax, MemUPA);
380
 
381
	/* force the new mappings to take effect */
382
	mmuflushtlb(PADDR(m->pdb));
383
}
384
 
385
/*
386
 * add region at physical base of len bytes to map for `type', and
387
 * set up page tables to map virtual KZERO|base to physical base.
388
 */
389
static void
390
map(ulong base, ulong len, int type)
391
{
392
	ulong n, flags, maxkpa;
393
 
394
//	iprint("map %.8lux %.8lux %d (", base, base+len, type);
395
	/*
396
	 * Split any call crossing MemMin to make below simpler.
397
	 */
398
	if(base < MemMin && len > MemMin-base){
399
		n = MemMin - base;
400
		map(base, n, type);
401
		map(MemMin, len-n, type);
402
		return;
403
	}
404
 
405
	switch(type){
406
	case MemRAM:
407
		mapfree(&rmapram, base, len);
408
		flags = PTEWRITE|PTEVALID;
409
		break;
410
	case MemUMB:
411
		mapfree(&rmapumb, base, len);
412
		flags = PTEWRITE|PTEUNCACHED|PTEVALID;
413
		break;
414
	case MemUPA:
415
		mapfree(&rmapupa, base, len);
416
		flags = 0;
417
		break;
418
	default:
419
	case MemReserved:
420
		flags = 0;
421
		break;
422
	}
423
 
424
	/*
425
	 * Only map from KZERO to 2^32.
426
	 */
427
	if(flags){
428
		maxkpa = -KZERO;
429
		if(base >= maxkpa)
430
			return;
431
		if(len > maxkpa-base)
432
			len = maxkpa - base;
433
		pdbmap(m->pdb, base|flags, base+KZERO, len);
434
	}
435
}
436
 
437
void
438
meminit(void)
439
{
440
	int i, kzsub;
441
	Map *mp;
442
	Confmem *cm;
443
	ulong pa, *pte;
444
	ulong lost, physpte;
445
 
446
	/* no need to size memory, we don't need much. */
447
	pte = m->pdb + BY2PG/BY2WD;		/* see l*.s */
448
 
449
	/* populate pdb with double-mapping of low memory */
450
	kzsub = ((uintptr)KZERO >> (2*PGSHIFT - 4)) / sizeof(ulong);
451
	physpte = (uintptr)PADDR(pte);
452
	for (i = 0; i < LOWPTEPAGES; i++)
453
		m->pdb[kzsub + i] = m->pdb[i] =
454
			PTEVALID | PTEKERNEL | PTEWRITE | (physpte + i * BY2PG);
455
 
456
	/*
457
	 * Set special attributes for memory between 640KB and 1MB:
458
	 *   VGA memory is writethrough;
459
	 *   BIOS ROM's/UMB's are uncached;
460
	 * then scan for useful memory.
461
	 */
462
	for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
463
		pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
464
		*pte |= PTEWT;
465
	}
466
	for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
467
		pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
468
		*pte |= PTEUNCACHED;
469
	}
470
	mmuflushtlb(PADDR(m->pdb));
471
 
472
	umbscan();
473
	lowraminit();
474
 
475
	/*
476
	 * Set the conf entries describing banks of allocatable memory.
477
	 */
478
	for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
479
		mp = &rmapram.map[i];
480
		cm = &conf.mem[i];
481
		cm->base = mp->addr;
482
		cm->npage = mp->size/BY2PG;
483
		if (i == 0 && cm->npage == 0)
484
			panic("meminit: no memory in conf.mem");
485
	}
486
	lost = 0;
487
	for(; i<nelem(mapram); i++)
488
		lost += rmapram.map[i].size;
489
	if(lost)
490
		print("meminit - lost %lud bytes\n", lost);
491
 
492
	if(MEMDEBUG)
493
		memdebug();
494
}
495
 
496
/*
497
 * Allocate memory from the upper memory blocks.
498
 */
499
ulong
500
umbmalloc(ulong addr, int size, int align)
501
{
502
	ulong a;
503
 
504
	if(a = mapalloc(&rmapumb, addr, size, align))
505
		return (ulong)KADDR(a);
506
 
507
	return 0;
508
}
509
 
510
void
511
umbfree(ulong addr, int size)
512
{
513
	mapfree(&rmapumb, PADDR(addr), size);
514
}
515
 
516
ulong
517
umbrwmalloc(ulong addr, int size, int align)
518
{
519
	ulong a;
520
	uchar o[2], *p;
521
 
522
	if(a = mapalloc(&rmapumbrw, addr, size, align))
523
		return(ulong)KADDR(a);
524
 
525
	/*
526
	 * Perhaps the memory wasn't visible before
527
	 * the interface is initialised, so try again.
528
	 */
529
	if((a = umbmalloc(addr, size, align)) == 0)
530
		return 0;
531
	p = (uchar*)a;
532
	o[0] = p[0];
533
	p[0] = 0xCC;
534
	o[1] = p[size-1];
535
	p[size-1] = 0xCC;
536
	if(p[0] == 0xCC && p[size-1] == 0xCC){
537
		p[0] = o[0];
538
		p[size-1] = o[1];
539
		return a;
540
	}
541
	umbfree(a, size);
542
 
543
	return 0;
544
}
545
 
546
void
547
umbrwfree(ulong addr, int size)
548
{
549
	mapfree(&rmapumbrw, PADDR(addr), size);
550
}
551
 
552
/*
553
 * Give out otherwise-unused physical address space
554
 * for use in configuring devices.  Note that unlike upamalloc
555
 * before it, upaalloc does not map the physical address
556
 * into virtual memory.  Call vmap to do that.
557
 */
558
ulong
559
upaalloc(int size, int align)
560
{
561
	ulong a;
562
 
563
	a = mapalloc(&rmapupa, 0, size, align);
564
	if(a == 0){
565
		print("out of physical address space allocating %d\n", size);
566
		mapprint(&rmapupa);
567
	}
568
	return a;
569
}
570
 
571
void
572
upafree(ulong pa, int size)
573
{
574
	mapfree(&rmapupa, pa, size);
575
}
576
 
577
void
578
upareserve(ulong pa, int size)
579
{
580
	ulong a;
581
 
582
	a = mapalloc(&rmapupa, pa, size, 0);
583
	if(a != pa){
584
		/*
585
		 * This can happen when we're using the E820
586
		 * map, which might have already reserved some
587
		 * of the regions claimed by the pci devices.
588
		 */
589
	//	print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
590
		if(a != 0)
591
			mapfree(&rmapupa, a, size);
592
	}
593
}
594
 
595
void
596
memorysummary(void)
597
{
598
	memdebug();
599
}
600