Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
/*
2
 * Size memory and create the kernel page-tables on the fly while doing so.
3
 * Called from main(), this code should only be run by the bootstrap processor.
4
 *
5
 * MemMin is what the bootstrap code in l.s has already mapped;
6
 * MemMax is the limit of physical memory to scan.
7
 */
8
#include "u.h"
9
#include "../port/lib.h"
10
#include "mem.h"
11
#include "dat.h"
12
#include "fns.h"
13
#include "io.h"
14
#include "ureg.h"
15
 
16
#define MEMDEBUG	0
17
 
18
enum {
19
	MemUPA		= 0,		/* unbacked physical address */
20
	MemRAM		= 1,		/* physical memory */
21
	MemUMB		= 2,		/* upper memory block (<16MB) */
22
	MemReserved	= 3,
23
	NMemType	= 4,
24
 
25
	KB		= 1024,
26
 
27
	MemMin		= 8*MB,
28
	MemMax		= (3*1024+768)*MB,
29
};
30
 
31
typedef struct Map Map;
32
struct Map {
33
	ulong	size;
34
	ulong	addr;
35
};
36
 
37
typedef struct RMap RMap;
38
struct RMap {
39
	char*	name;
40
	Map*	map;
41
	Map*	mapend;
42
 
43
	Lock;
44
};
45
 
46
/* 
47
 * Memory allocation tracking.
48
 */
49
static Map mapupa[16];
50
static RMap rmapupa = {
51
	"unallocated unbacked physical memory",
52
	mapupa,
53
	&mapupa[nelem(mapupa)-1],
54
};
55
 
56
static Map mapram[16];
57
static RMap rmapram = {
58
	"physical memory",
59
	mapram,
60
	&mapram[nelem(mapram)-1],
61
};
62
 
63
static Map mapumb[64];
64
static RMap rmapumb = {
65
	"upper memory block",
66
	mapumb,
67
	&mapumb[nelem(mapumb)-1],
68
};
69
 
70
static Map mapumbrw[16];
71
static RMap rmapumbrw = {
72
	"UMB device memory",
73
	mapumbrw,
74
	&mapumbrw[nelem(mapumbrw)-1],
75
};
76
 
77
void
78
mapprint(RMap *rmap)
79
{
80
	Map *mp;
81
 
82
	print("%s\n", rmap->name);	
83
	for(mp = rmap->map; mp->size; mp++)
84
		print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
85
}
86
 
87
 
88
void
89
memdebug(void)
90
{
91
	ulong maxpa, maxpa1, maxpa2;
92
 
93
	maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
94
	maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
95
	maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
96
	print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
97
		maxpa, MB+maxpa*KB, maxpa1, maxpa2);
98
 
99
	mapprint(&rmapram);
100
	mapprint(&rmapumb);
101
	mapprint(&rmapumbrw);
102
	mapprint(&rmapupa);
103
}
104
 
105
void
106
mapfree(RMap* rmap, ulong addr, ulong size)
107
{
108
	Map *mp;
109
	ulong t;
110
 
111
	if(size <= 0)
112
		return;
113
 
114
	lock(rmap);
115
	for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
116
		;
117
 
118
	if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
119
		(mp-1)->size += size;
120
		if(addr+size == mp->addr){
121
			(mp-1)->size += mp->size;
122
			while(mp->size){
123
				mp++;
124
				(mp-1)->addr = mp->addr;
125
				(mp-1)->size = mp->size;
126
			}
127
		}
128
	}
129
	else{
130
		if(addr+size == mp->addr && mp->size){
131
			mp->addr -= size;
132
			mp->size += size;
133
		}
134
		else do{
135
			if(mp >= rmap->mapend){
136
				print("mapfree: %s: losing 0x%luX, %ld\n",
137
					rmap->name, addr, size);
138
				break;
139
			}
140
			t = mp->addr;
141
			mp->addr = addr;
142
			addr = t;
143
			t = mp->size;
144
			mp->size = size;
145
			mp++;
146
		}while(size = t);
147
	}
148
	unlock(rmap);
149
}
150
 
151
ulong
152
mapalloc(RMap* rmap, ulong addr, int size, int align)
153
{
154
	Map *mp;
155
	ulong maddr, oaddr;
156
 
157
	lock(rmap);
158
	for(mp = rmap->map; mp->size; mp++){
159
		maddr = mp->addr;
160
 
161
		if(addr){
162
			/*
163
			 * A specific address range has been given:
164
			 *   if the current map entry is greater then
165
			 *   the address is not in the map;
166
			 *   if the current map entry does not overlap
167
			 *   the beginning of the requested range then
168
			 *   continue on to the next map entry;
169
			 *   if the current map entry does not entirely
170
			 *   contain the requested range then the range
171
			 *   is not in the map.
172
			 */
173
			if(maddr > addr)
174
				break;
175
			if(mp->size < addr - maddr)	/* maddr+mp->size < addr, but no overflow */
176
				continue;
177
			if(addr - maddr > mp->size - size)	/* addr+size > maddr+mp->size, but no overflow */
178
				break;
179
			maddr = addr;
180
		}
181
 
182
		if(align > 0)
183
			maddr = ((maddr+align-1)/align)*align;
184
		if(mp->addr+mp->size-maddr < size)
185
			continue;
186
 
187
		oaddr = mp->addr;
188
		mp->addr = maddr+size;
189
		mp->size -= maddr-oaddr+size;
190
		if(mp->size == 0){
191
			do{
192
				mp++;
193
				(mp-1)->addr = mp->addr;
194
			}while((mp-1)->size = mp->size);
195
		}
196
 
197
		unlock(rmap);
198
		if(oaddr != maddr)
199
			mapfree(rmap, oaddr, maddr-oaddr);
200
 
201
		return maddr;
202
	}
203
	unlock(rmap);
204
 
205
	return 0;
206
}
207
 
208
/*
209
 * Allocate from the ram map directly to make page tables.
210
 * Called by mmuwalk during e820scan.
211
 */
212
void*
213
rampage(void)
214
{
215
	ulong m;
216
 
217
	m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
218
	if(m == 0)
219
		return nil;
220
	return KADDR(m);
221
}
222
 
223
static void
224
umbexclude(void)
225
{
226
	int size;
227
	ulong addr;
228
	char *op, *p, *rptr;
229
 
230
	if((p = getconf("umbexclude")) == nil)
231
		return;
232
 
233
	while(p && *p != '\0' && *p != '\n'){
234
		op = p;
235
		addr = strtoul(p, &rptr, 0);
236
		if(rptr == nil || rptr == p || *rptr != '-'){
237
			print("umbexclude: invalid argument <%s>\n", op);
238
			break;
239
		}
240
		p = rptr+1;
241
 
242
		size = strtoul(p, &rptr, 0) - addr + 1;
243
		if(size <= 0){
244
			print("umbexclude: bad range <%s>\n", op);
245
			break;
246
		}
247
		if(rptr != nil && *rptr == ',')
248
			*rptr++ = '\0';
249
		p = rptr;
250
 
251
		mapalloc(&rmapumb, addr, size, 0);
252
	}
253
}
254
 
255
static void
256
umbscan(void)
257
{
258
	uchar o[2], *p;
259
 
260
	/*
261
	 * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
262
	 * which aren't used; they can be used later for devices which
263
	 * want to allocate some virtual address space.
264
	 * Check for two things:
265
	 * 1) device BIOS ROM. This should start with a two-byte header
266
	 *    of 0x55 0xAA, followed by a byte giving the size of the ROM
267
	 *    in 512-byte chunks. These ROM's must start on a 2KB boundary.
268
	 * 2) device memory. This is read-write.
269
	 * There are some assumptions: there's VGA memory at 0xA0000 and
270
	 * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
271
	 * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
272
	 * for grabs; check anyway.
273
	 */
274
	p = KADDR(0xD0000);
275
	while(p < (uchar*)KADDR(0xE0000)){
276
		/*
277
		 * Check for the ROM signature, skip if valid.
278
		 */
279
		if(p[0] == 0x55 && p[1] == 0xAA){
280
			p += p[2]*512;
281
			continue;
282
		}
283
 
284
		/*
285
		 * Is it writeable? If yes, then stick it in
286
		 * the UMB device memory map. A floating bus will
287
		 * return 0xff, so add that to the map of the
288
		 * UMB space available for allocation.
289
		 * If it is neither of those, ignore it.
290
		 */
291
		o[0] = p[0];
292
		p[0] = 0xCC;
293
		o[1] = p[2*KB-1];
294
		p[2*KB-1] = 0xCC;
295
		if(p[0] == 0xCC && p[2*KB-1] == 0xCC){
296
			p[0] = o[0];
297
			p[2*KB-1] = o[1];
298
			mapfree(&rmapumbrw, PADDR(p), 2*KB);
299
		}
300
		else if(p[0] == 0xFF && p[1] == 0xFF)
301
			mapfree(&rmapumb, PADDR(p), 2*KB);
302
		p += 2*KB;
303
	}
304
 
305
	p = KADDR(0xE0000);
306
	if(p[0] != 0x55 || p[1] != 0xAA){
307
		p[0] = 0xCC;
308
		p[64*KB-1] = 0xCC;
309
		if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
310
			mapfree(&rmapumb, PADDR(p), 64*KB);
311
	}
312
 
313
	umbexclude();
314
}
315
 
316
static void*
317
sigscan(uchar* addr, int len, char* signature)
318
{
319
	int sl;
320
	uchar *e, *p;
321
 
322
	e = addr+len;
323
	sl = strlen(signature);
324
	for(p = addr; p+sl < e; p += 16)
325
		if(memcmp(p, signature, sl) == 0)
326
			return p;
327
	return nil;
328
}
329
 
330
void*
331
sigsearch(char* signature)
332
{
333
	uintptr p;
334
	uchar *bda;
335
	void *r;
336
 
337
	/*
338
	 * Search for the data structure:
339
	 * 1) within the first KiB of the Extended BIOS Data Area (EBDA), or
340
	 * 2) within the last KiB of system base memory if the EBDA segment
341
	 *    is undefined, or
342
	 * 3) within the BIOS ROM address space between 0xf0000 and 0xfffff
343
	 *    (but will actually check 0xe0000 to 0xfffff).
344
	 */
345
	bda = BIOSSEG(0x40);
346
	if(memcmp(KADDR(0xfffd9), "EISA", 4) == 0){
347
		if((p = (bda[0x0f]<<8)|bda[0x0e]) != 0){
348
			if((r = sigscan(BIOSSEG(p), 1024, signature)) != nil)
349
				return r;
350
		}
351
	}
352
 
353
	if((p = ((bda[0x14]<<8)|bda[0x13])*1024) != 0){
354
		if((r = sigscan(KADDR(p-1024), 1024, signature)) != nil)
355
			return r;
356
	}
357
	/* hack for virtualbox: look in KiB below 0xa0000 */
358
	if((r = sigscan(KADDR(0xa0000-1024), 1024, signature)) != nil)
359
		return r;
360
 
361
	return sigscan(BIOSSEG(0xe000), 0x20000, signature);
362
}
363
 
364
static void
365
lowraminit(void)
366
{
367
	ulong n, pa, x;
368
	uchar *bda;
369
 
370
	/*
371
	 * Initialise the memory bank information for conventional memory
372
	 * (i.e. less than 640KB). The base is the first location after the
373
	 * bootstrap processor MMU information and the limit is obtained from
374
	 * the BIOS data area.
375
	 */
376
	x = PADDR(CPU0END);
377
	bda = (uchar*)KADDR(0x400);
378
	n = ((bda[0x14]<<8)|bda[0x13])*KB-x;
379
	mapfree(&rmapram, x, n);
380
	memset(KADDR(x), 0, n);			/* keep us honest */
381
 
382
	x = PADDR(PGROUND((ulong)end));
383
	pa = MemMin;
384
	if(x > pa)
385
		panic("kernel too big");
386
	mapfree(&rmapram, x, pa-x);
387
	memset(KADDR(x), 0, pa-x);		/* keep us honest */
388
}
389
 
390
static void
391
ramscan(ulong maxmem)
392
{
393
	ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
394
	int nvalid[NMemType];
395
 
396
	/*
397
	 * The bootstrap code has has created a prototype page
398
	 * table which maps the first MemMin of physical memory to KZERO.
399
	 * The page directory is at m->pdb and the first page of
400
	 * free memory is after the per-processor MMU information.
401
	 */
402
	pa = MemMin;
403
 
404
	/*
405
	 * Check if the extended memory size can be obtained from the CMOS.
406
	 * If it's 0 then it's either not known or >= 64MB. Always check
407
	 * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
408
	 * in this case the memory from the gap is remapped to the top of
409
	 * memory.
410
	 * The value in CMOS is supposed to be the number of KB above 1MB.
411
	 */
412
	if(maxmem == 0){
413
		x = (nvramread(0x18)<<8)|nvramread(0x17);
414
		if(x == 0 || x >= (63*KB))
415
			maxpa = MemMax;
416
		else
417
			maxpa = MB+x*KB;
418
		if(maxpa < 24*MB)
419
			maxpa = 24*MB;
420
	}else
421
		maxpa = maxmem;
422
	maxkpa = (u32int)-KZERO;	/* 2^32 - KZERO */
423
 
424
	/*
425
	 * March up memory from MemMin to maxpa 1MB at a time,
426
	 * mapping the first page and checking the page can
427
	 * be written and read correctly. The page tables are created here
428
	 * on the fly, allocating from low memory as necessary.
429
	 */
430
	k0 = (ulong*)KADDR(0);
431
	kzero = *k0;
432
	map = 0;
433
	x = 0x12345678;
434
	memset(nvalid, 0, sizeof(nvalid));
435
 
436
	/*
437
	 * Can't map memory to KADDR(pa) when we're walking because
438
	 * can only use KADDR for relatively low addresses.
439
	 * Instead, map each 4MB we scan to the virtual address range
440
	 * MemMin->MemMin+4MB while we are scanning.
441
	 */
442
	vbase = MemMin;
443
	while(pa < maxpa){
444
		/*
445
		 * Map the page. Use mapalloc(&rmapram, ...) to make
446
		 * the page table if necessary, it will be returned to the
447
		 * pool later if it isn't needed.  Map in a fixed range (the second 4M)
448
		 * because high physical addresses cannot be passed to KADDR.
449
		 */
450
		va = (void*)(vbase + pa%(4*MB));
451
		table = &m->pdb[PDX(va)];
452
		if(pa%(4*MB) == 0){
453
			if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
454
				break;
455
			memset(KADDR(map), 0, BY2PG);
456
			*table = map|PTEWRITE|PTEVALID;
457
			memset(nvalid, 0, sizeof(nvalid));
458
		}
459
		table = KADDR(PPN(*table));
460
		pte = &table[PTX(va)];
461
 
462
		*pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
463
		mmuflushtlb(PADDR(m->pdb));
464
		/*
465
		 * Write a pattern to the page and write a different
466
		 * pattern to a possible mirror at KZERO. If the data
467
		 * reads back correctly the chunk is some type of RAM (possibly
468
		 * a linearly-mapped VGA framebuffer, for instance...) and
469
		 * can be cleared and added to the memory pool. If not, the
470
		 * chunk is marked uncached and added to the UMB pool if <16MB
471
		 * or is marked invalid and added to the UPA pool.
472
		 */
473
		*va = x;
474
		*k0 = ~x;
475
		if(*va == x){
476
			nvalid[MemRAM] += MB/BY2PG;
477
			mapfree(&rmapram, pa, MB);
478
 
479
			do{
480
				*pte++ = pa|PTEWRITE|PTEVALID;
481
				pa += BY2PG;
482
			}while(pa % MB);
483
			mmuflushtlb(PADDR(m->pdb));
484
			/* memset(va, 0, MB); so damn slow to memset all of memory */
485
		}
486
		else if(pa < 16*MB){
487
			nvalid[MemUMB] += MB/BY2PG;
488
			mapfree(&rmapumb, pa, MB);
489
 
490
			do{
491
				*pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
492
				pa += BY2PG;
493
			}while(pa % MB);
494
		}
495
		else{
496
			nvalid[MemUPA] += MB/BY2PG;
497
			mapfree(&rmapupa, pa, MB);
498
 
499
			*pte = 0;
500
			pa += MB;
501
		}
502
		/*
503
		 * Done with this 4MB chunk, review the options:
504
		 * 1) not physical memory and >=16MB - invalidate the PDB entry;
505
		 * 2) physical memory - use the 4MB page extension if possible;
506
		 * 3) not physical memory and <16MB - use the 4MB page extension
507
		 *    if possible;
508
		 * 4) mixed or no 4MB page extension - commit the already
509
		 *    initialised space for the page table.
510
		 */
511
		if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
512
			/*
513
			 * If we encounter a 4MB chunk of missing memory
514
			 * at a sufficiently high offset, call it the end of
515
			 * memory.  Otherwise we run the risk of thinking
516
			 * that video memory is real RAM.
517
			 */
518
			break;
519
		}
520
		if(pa <= maxkpa && pa%(4*MB) == 0){
521
			table = &m->pdb[PDX(KADDR(pa - 4*MB))];
522
			if(nvalid[MemUPA] == (4*MB)/BY2PG)
523
				*table = 0;
524
			else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
525
				*table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
526
			else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
527
				*table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
528
			else{
529
				*table = map|PTEWRITE|PTEVALID;
530
				map = 0;
531
			}
532
		}
533
		mmuflushtlb(PADDR(m->pdb));
534
		x += 0x3141526;
535
	}
536
	/*
537
	 * If we didn't reach the end of the 4MB chunk, that part won't
538
	 * be mapped.  Commit the already initialised space for the page table.
539
	 */
540
	if(pa % (4*MB) && pa <= maxkpa){
541
		m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
542
		map = 0;
543
	}
544
	if(map)
545
		mapfree(&rmapram, map, BY2PG);
546
 
547
	m->pdb[PDX(vbase)] = 0;
548
	mmuflushtlb(PADDR(m->pdb));
549
 
550
	mapfree(&rmapupa, pa, (u32int)-pa);
551
	*k0 = kzero;
552
}
553
 
554
/*
555
 * BIOS Int 0x15 E820 memory map.
556
 */
557
enum
558
{
559
	SMAP = ('S'<<24)|('M'<<16)|('A'<<8)|'P',
560
	Ememory = 1,
561
	Ereserved = 2,
562
	Carry = 1,
563
};
564
 
565
typedef struct Emap Emap;
566
struct Emap
567
{
568
	uvlong base;
569
	uvlong len;
570
	ulong type;
571
};
572
static Emap emap[16];
573
int nemap;
574
 
575
static char *etypes[] =
576
{
577
	"type=0",
578
	"memory",
579
	"reserved",
580
	"acpi reclaim",
581
	"acpi nvs",
582
};
583
 
584
static int
585
emapcmp(const void *va, const void *vb)
586
{
587
	Emap *a, *b;
588
 
589
	a = (Emap*)va;
590
	b = (Emap*)vb;
591
	if(a->base < b->base)
592
		return -1;
593
	if(a->base > b->base)
594
		return 1;
595
	if(a->len < b->len)
596
		return -1;
597
	if(a->len > b->len)
598
		return 1;
599
	return a->type - b->type;
600
}
601
 
602
static void
603
map(ulong base, ulong len, int type)
604
{
605
	ulong e, n;
606
	ulong *table, flags, maxkpa;
607
 
608
	/*
609
	 * Split any call crossing MemMin to make below simpler.
610
	 */
611
	if(base < MemMin && len > MemMin-base){
612
		n = MemMin - base;
613
		map(base, n, type);
614
		map(MemMin, len-n, type);
615
	}
616
 
617
	/*
618
	 * Let lowraminit and umbscan hash out the low MemMin.
619
	 */
620
	if(base < MemMin)
621
		return;
622
 
623
	/*
624
	 * Any non-memory below 16*MB is used as upper mem blocks.
625
	 */
626
	if(type == MemUPA && base < 16*MB && base+len > 16*MB){
627
		map(base, 16*MB-base, MemUMB);
628
		map(16*MB, len-(16*MB-base), MemUPA);
629
		return;
630
	}
631
 
632
	/*
633
	 * Memory below CPU0END is reserved for the kernel
634
	 * and already mapped.
635
	 */
636
	if(base < PADDR(CPU0END)){
637
		n = PADDR(CPU0END) - base;
638
		if(len <= n)
639
			return;
640
		map(PADDR(CPU0END), len-n, type);
641
		return;
642
	}
643
 
644
	/*
645
	 * Memory between KTZERO and end is the kernel itself
646
	 * and is already mapped.
647
	 */
648
	if(base < PADDR(KTZERO) && base+len > PADDR(KTZERO)){
649
		map(base, PADDR(KTZERO)-base, type);
650
		return;
651
	}
652
	if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
653
		n = PADDR(PGROUND((ulong)end));
654
		if(len <= n)
655
			return;
656
		map(PADDR(PGROUND((ulong)end)), len-n, type);
657
		return;
658
	}
659
 
660
	/*
661
	 * Now we have a simple case.
662
	 */
663
	// print("map %.8lux %.8lux %d\n", base, base+len, type);
664
	switch(type){
665
	case MemRAM:
666
		mapfree(&rmapram, base, len);
667
		flags = PTEWRITE|PTEVALID;
668
		break;
669
	case MemUMB:
670
		mapfree(&rmapumb, base, len);
671
		flags = PTEWRITE|PTEUNCACHED|PTEVALID;
672
		break;
673
	case MemUPA:
674
		mapfree(&rmapupa, base, len);
675
		flags = 0;
676
		break;
677
	default:
678
	case MemReserved:
679
		flags = 0;
680
		break;
681
	}
682
 
683
	/*
684
	 * bottom MemMin is already mapped - just twiddle flags.
685
	 * (not currently used - see above)
686
	 */
687
	if(base < MemMin){
688
		table = KADDR(PPN(m->pdb[PDX(base)]));
689
		e = base+len;
690
		base = PPN(base);
691
		for(; base<e; base+=BY2PG)
692
			table[PTX(base)] |= flags;
693
		return;
694
	}
695
 
696
	/*
697
	 * Only map from KZERO to 2^32.
698
	 */
699
	if(flags){
700
		maxkpa = -KZERO;
701
		if(base >= maxkpa)
702
			return;
703
		if(len > maxkpa-base)
704
			len = maxkpa - base;
705
		pdbmap(m->pdb, base|flags, base+KZERO, len);
706
	}
707
}
708
 
709
static int
710
e820scan(void)
711
{
712
	int i;
713
	Ureg u;
714
	ulong cont, base, len;
715
	uvlong last;
716
	Emap *e;
717
 
718
	if(getconf("*norealmode") || getconf("*noe820scan"))
719
		return -1;
720
 
721
	cont = 0;
722
	for(i=0; i<nelem(emap); i++){
723
		memset(&u, 0, sizeof u);
724
		u.ax = 0xE820;
725
		u.bx = cont;
726
		u.cx = 20;
727
		u.dx = SMAP;
728
		u.es = (PADDR(RMBUF)>>4)&0xF000;
729
		u.di = PADDR(RMBUF)&0xFFFF;
730
		u.trap = 0x15;
731
		realmode(&u);
732
		cont = u.bx;
733
		if((u.flags&Carry) || u.ax != SMAP || u.cx != 20)
734
			break;
735
		e = &emap[nemap++];
736
		*e = *(Emap*)RMBUF;
737
		if(u.bx == 0)
738
			break;
739
	}
740
	if(nemap == 0)
741
		return -1;
742
 
743
	qsort(emap, nemap, sizeof emap[0], emapcmp);
744
 
745
	if(getconf("*noe820print") == nil){
746
		for(i=0; i<nemap; i++){
747
			e = &emap[i];
748
			print("E820: %.8llux %.8llux ", e->base, e->base+e->len);
749
			if(e->type < nelem(etypes))
750
				print("%s\n", etypes[e->type]);
751
			else
752
				print("type=%lud\n", e->type);
753
		}
754
	}
755
 
756
	last = 0;
757
	for(i=0; i<nemap; i++){	
758
		e = &emap[i];
759
		/*
760
		 * pull out the info but only about the low 32 bits...
761
		 */
762
		if(e->base >= (1LL<<32))
763
			break;
764
		base = e->base;
765
		if(base+e->len > (1LL<<32))
766
			len = -base;
767
		else
768
			len = e->len;
769
		/*
770
		 * If the map skips addresses, mark them available.
771
		 */
772
		if(last < e->base)
773
			map(last, e->base-last, MemUPA);
774
		last = base+len;
775
		if(e->type == Ememory)
776
			map(base, len, MemRAM);
777
		else
778
			map(base, len, MemReserved);
779
	}
780
	if(last < (1LL<<32))
781
		map(last, (u32int)-last, MemUPA);
782
	return 0;
783
}
784
 
785
void
786
meminit(void)
787
{
788
	int i;
789
	Map *mp;
790
	Confmem *cm;
791
	ulong pa, *pte;
792
	ulong maxmem, lost;
793
	char *p;
794
 
795
	if(p = getconf("*maxmem"))
796
		maxmem = strtoul(p, 0, 0);
797
	else
798
		maxmem = 0;
799
 
800
	/*
801
	 * Set special attributes for memory between 640KB and 1MB:
802
	 *   VGA memory is writethrough;
803
	 *   BIOS ROM's/UMB's are uncached;
804
	 * then scan for useful memory.
805
	 */
806
	for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
807
		pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
808
		*pte |= PTEWT;
809
	}
810
	for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
811
		pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
812
		*pte |= PTEUNCACHED;
813
	}
814
	mmuflushtlb(PADDR(m->pdb));
815
 
816
	umbscan();
817
	lowraminit();
818
	if(e820scan() < 0)
819
		ramscan(maxmem);
820
 
821
	/*
822
	 * Set the conf entries describing banks of allocatable memory.
823
	 */
824
	for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
825
		mp = &rmapram.map[i];
826
		cm = &conf.mem[i];
827
		cm->base = mp->addr;
828
		cm->npage = mp->size/BY2PG;
829
	}
830
 
831
	lost = 0;
832
	for(; i<nelem(mapram); i++)
833
		lost += rmapram.map[i].size;
834
	if(lost)
835
		print("meminit - lost %lud bytes\n", lost);
836
 
837
	if(MEMDEBUG)
838
		memdebug();
839
}
840
 
841
/*
842
 * Allocate memory from the upper memory blocks.
843
 */
844
ulong
845
umbmalloc(ulong addr, int size, int align)
846
{
847
	ulong a;
848
 
849
	if(a = mapalloc(&rmapumb, addr, size, align))
850
		return (ulong)KADDR(a);
851
 
852
	return 0;
853
}
854
 
855
void
856
umbfree(ulong addr, int size)
857
{
858
	mapfree(&rmapumb, PADDR(addr), size);
859
}
860
 
861
ulong
862
umbrwmalloc(ulong addr, int size, int align)
863
{
864
	ulong a;
865
	uchar o[2], *p;
866
 
867
	if(a = mapalloc(&rmapumbrw, addr, size, align))
868
		return(ulong)KADDR(a);
869
 
870
	/*
871
	 * Perhaps the memory wasn't visible before
872
	 * the interface is initialised, so try again.
873
	 */
874
	if((a = umbmalloc(addr, size, align)) == 0)
875
		return 0;
876
	p = (uchar*)a;
877
	o[0] = p[0];
878
	p[0] = 0xCC;
879
	o[1] = p[size-1];
880
	p[size-1] = 0xCC;
881
	if(p[0] == 0xCC && p[size-1] == 0xCC){
882
		p[0] = o[0];
883
		p[size-1] = o[1];
884
		return a;
885
	}
886
	umbfree(a, size);
887
 
888
	return 0;
889
}
890
 
891
void
892
umbrwfree(ulong addr, int size)
893
{
894
	mapfree(&rmapumbrw, PADDR(addr), size);
895
}
896
 
897
/*
898
 * Give out otherwise-unused physical address space
899
 * for use in configuring devices.  Note that unlike upamalloc
900
 * before it, upaalloc does not map the physical address
901
 * into virtual memory.  Call vmap to do that.
902
 */
903
ulong
904
upaalloc(int size, int align)
905
{
906
	ulong a;
907
 
908
	a = mapalloc(&rmapupa, 0, size, align);
909
	if(a == 0){
910
		print("out of physical address space allocating %d\n", size);
911
		mapprint(&rmapupa);
912
	}
913
	return a;
914
}
915
 
916
void
917
upafree(ulong pa, int size)
918
{
919
	mapfree(&rmapupa, pa, size);
920
}
921
 
922
void
923
upareserve(ulong pa, int size)
924
{
925
	ulong a;
926
 
927
	a = mapalloc(&rmapupa, pa, size, 0);
928
	if(a != pa){
929
		/*
930
		 * This can happen when we're using the E820
931
		 * map, which might have already reserved some
932
		 * of the regions claimed by the pci devices.
933
		 */
934
	//	print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
935
		if(a != 0)
936
			mapfree(&rmapupa, a, size);
937
	}
938
}
939
 
940
void
941
memorysummary(void)
942
{
943
	memdebug();
944
}
945