Subversion Repositories planix.SVN

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
/*
2
 * This allocator takes blocks from a coarser allocator (p->alloc) and
3
 * uses them as arenas.
4
 * 
5
 * An arena is split into a sequence of blocks of variable size.  The
6
 * blocks begin with a Bhdr that denotes the length (including the Bhdr)
7
 * of the block.  An arena begins with an Arena header block (Arena,
8
 * ARENA_MAGIC) and ends with a Bhdr block with magic ARENATAIL_MAGIC and
9
 * size 0.  Intermediate blocks are either allocated or free.  At the end
10
 * of each intermediate block is a Btail, which contains information
11
 * about where the block starts.  This is useful for walking backwards.
12
 * 
13
 * Free blocks (Free*) have a magic value of FREE_MAGIC in their Bhdr
14
 * headers.  They are kept in a binary tree (p->freeroot) traversible by
15
 * walking ->left and ->right.  Each node of the binary tree is a pointer
16
 * to a circular doubly-linked list (next, prev) of blocks of identical
17
 * size.  Blocks are added to this ``tree of lists'' by pooladd(), and
18
 * removed by pooldel().
19
 * 
20
 * When freed, adjacent blocks are coalesced to create larger blocks when
21
 * possible.
22
 * 
23
 * Allocated blocks (Alloc*) have one of two magic values: ALLOC_MAGIC or
24
 * UNALLOC_MAGIC.  When blocks are released from the pool, they have
25
 * magic value UNALLOC_MAGIC.  Once the block has been trimmed by trim()
26
 * and the amount of user-requested data has been recorded in the
27
 * datasize field of the tail, the magic value is changed to ALLOC_MAGIC.
28
 * All blocks returned to callers should be of type ALLOC_MAGIC, as
29
 * should all blocks passed to us by callers.  The amount of data the user
30
 * asked us for can be found by subtracting the short in tail->datasize 
31
 * from header->size.  Further, the up to at most four bytes between the
32
 * end of the user-requested data block and the actual Btail structure are
33
 * marked with a magic value, which is checked to detect user overflow.
34
 * 
35
 * The arenas returned by p->alloc are kept in a doubly-linked list
36
 * (p->arenalist) running through the arena headers, sorted by descending
37
 * base address (prev, next).  When a new arena is allocated, we attempt
38
 * to merge it with its two neighbors via p->merge.
39
 */
40
 
41
#include <u.h>
42
#include <libc.h>
43
#include <pool.h>
44
 
45
typedef struct Alloc	Alloc;
46
typedef struct Arena	Arena;
47
typedef struct Bhdr	Bhdr;
48
typedef struct Btail	Btail;
49
typedef struct Free	Free;
50
 
51
struct Bhdr {
52
	ulong	magic;
53
	ulong	size;
54
};
55
enum {
56
	NOT_MAGIC = 0xdeadfa11,
57
	DEAD_MAGIC = 0xdeaddead,
58
};
59
#define B2NB(b) ((Bhdr*)((uchar*)(b)+(b)->size))
60
 
61
#define SHORT(x) (((x)[0] << 8) | (x)[1])
62
#define PSHORT(p, x) \
63
	(((uchar*)(p))[0] = ((x)>>8)&0xFF, \
64
	((uchar*)(p))[1] = (x)&0xFF)
65
 
66
enum {
67
	TAIL_MAGIC0 = 0xBE,
68
	TAIL_MAGIC1 = 0xEF
69
};
70
struct Btail {
71
	uchar	magic0;
72
	uchar	datasize[2];
73
	uchar	magic1;
74
	ulong	size;	/* same as Bhdr->size */
75
};
76
#define B2T(b)	((Btail*)((uchar*)(b)+(b)->size-sizeof(Btail)))
77
#define B2PT(b) ((Btail*)((uchar*)(b)-sizeof(Btail)))
78
#define T2HDR(t) ((Bhdr*)((uchar*)(t)+sizeof(Btail)-(t)->size))
79
struct Free {
80
			Bhdr;
81
	Free*	left;
82
	Free*	right;
83
	Free*	next;
84
	Free*	prev;
85
};
86
enum {
87
	FREE_MAGIC = 0xBA5EBA11,
88
};
89
 
90
/*
91
 * the point of the notused fields is to make 8c differentiate
92
 * between Bhdr and Allocblk, and between Kempt and Unkempt.
93
 */
94
struct Alloc {
95
			Bhdr;
96
};
97
enum {
98
	ALLOC_MAGIC = 0x0A110C09,
99
	UNALLOC_MAGIC = 0xCAB00D1E+1,
100
};
101
 
102
struct Arena {
103
			Bhdr;
104
	Arena*	aup;
105
	Arena*	down;
106
	ulong	asize;
107
	ulong	pad;	/* to a multiple of 8 bytes */
108
};
109
enum {
110
	ARENA_MAGIC = 0xC0A1E5CE+1,
111
	ARENATAIL_MAGIC = 0xEC5E1A0C+1,
112
};
113
#define A2TB(a)	((Bhdr*)((uchar*)(a)+(a)->asize-sizeof(Bhdr)))
114
#define A2B(a)	B2NB(a)
115
 
116
enum {
117
	ALIGN_MAGIC = 0xA1F1D1C1,
118
};
119
 
120
enum {
121
	MINBLOCKSIZE = sizeof(Free)+sizeof(Btail)
122
};
123
 
124
static uchar datamagic[] = { 0xFE, 0xF1, 0xF0, 0xFA };
125
 
126
#define	Poison	(void*)0xCafeBabe
127
 
128
#define _B2D(a)	((void*)((uchar*)a+sizeof(Bhdr)))
129
#define _D2B(v)	((Alloc*)((uchar*)v-sizeof(Bhdr)))
130
 
131
// static void*	_B2D(void*);
132
// static void*	_D2B(void*);
133
static void*	B2D(Pool*, Alloc*);
134
static Alloc*	D2B(Pool*, void*);
135
static Arena*	arenamerge(Pool*, Arena*, Arena*);
136
static void		blockcheck(Pool*, Bhdr*);
137
static Alloc*	blockmerge(Pool*, Bhdr*, Bhdr*);
138
static Alloc*	blocksetdsize(Pool*, Alloc*, ulong);
139
static Bhdr*	blocksetsize(Bhdr*, ulong);
140
static ulong	bsize2asize(Pool*, ulong);
141
static ulong	dsize2bsize(Pool*, ulong);
142
static ulong	getdsize(Alloc*);
143
static Alloc*	trim(Pool*, Alloc*, ulong);
144
static Free*	listadd(Free*, Free*);
145
static void		logstack(Pool*);
146
static Free**	ltreewalk(Free**, ulong);
147
static void		memmark(void*, int, ulong);
148
static Free*	pooladd(Pool*, Alloc*);
149
static void*	poolallocl(Pool*, ulong);
150
static void		poolcheckl(Pool*);
151
static void		poolcheckarena(Pool*, Arena*);
152
static int		poolcompactl(Pool*);
153
static Alloc*	pooldel(Pool*, Free*);
154
static void		pooldumpl(Pool*);
155
static void		pooldumparena(Pool*, Arena*);
156
static void		poolfreel(Pool*, void*);
157
static void		poolnewarena(Pool*, ulong);
158
static void*	poolreallocl(Pool*, void*, ulong);
159
static Free*	treedelete(Free*, Free*);
160
static Free*	treeinsert(Free*, Free*);
161
static Free*	treelookup(Free*, ulong);
162
static Free*	treelookupgt(Free*, ulong);
163
 
164
/*
165
 * Debugging
166
 * 
167
 * Antagonism causes blocks to always be filled with garbage if their
168
 * contents are undefined.  This tickles both programs and the library.
169
 * It's a linear time hit but not so noticeable during nondegenerate use.
170
 * It would be worth leaving in except that it negates the benefits of the
171
 * kernel's demand-paging.  The tail magic and end-of-data magic 
172
 * provide most of the user-visible benefit that antagonism does anyway.
173
 *
174
 * Paranoia causes the library to recheck the entire pool on each lock
175
 * or unlock.  A failed check on unlock means we tripped over ourselves,
176
 * while a failed check on lock tends to implicate the user.  Paranoia has
177
 * the potential to slow things down a fair amount for pools with large
178
 * numbers of allocated blocks.  It completely negates all benefits won
179
 * by the binary tree.  Turning on paranoia in the kernel makes it painfully
180
 * slow.
181
 *
182
 * Verbosity induces the dumping of the pool via p->print at each lock operation.
183
 * By default, only one line is logged for each alloc, free, and realloc.
184
 */
185
 
186
/* the if(!x);else avoids ``dangling else'' problems */
187
#define antagonism	if(!(p->flags & POOL_ANTAGONISM)){}else
188
#define paranoia	if(!(p->flags & POOL_PARANOIA)){}else
189
#define verbosity	if(!(p->flags & POOL_VERBOSITY)){}else
190
 
191
#define DPRINT	if(!(p->flags & POOL_DEBUGGING)){}else p->print
192
#define LOG		if(!(p->flags & POOL_LOGGING)){}else p->print
193
 
194
/*
195
 * Tree walking
196
 */
197
 
198
static void
199
checklist(Free *t)
200
{
201
	Free *q;
202
 
203
	for(q=t->next; q!=t; q=q->next){
204
		assert(q->size == t->size);
205
		assert(q->next==nil || q->next->prev==q);
206
		assert(q->prev==nil || q->prev->next==q);
207
	//	assert(q->left==nil);
208
	//	assert(q->right==nil);
209
		assert(q->magic==FREE_MAGIC);
210
	}
211
}
212
 
213
static void
214
checktree(Free *t, int a, int b)
215
{
216
	assert(t->magic==FREE_MAGIC);
217
	assert(a < t->size && t->size < b);
218
	assert(t->next==nil || t->next->prev==t);
219
	assert(t->prev==nil || t->prev->next==t);
220
	checklist(t);
221
	if(t->left)
222
		checktree(t->left, a, t->size);
223
	if(t->right)
224
		checktree(t->right, t->size, b);
225
 
226
}
227
 
228
/* ltreewalk: return address of pointer to node of size == size */
229
static Free**
230
ltreewalk(Free **t, ulong size)
231
{
232
	assert(t != nil /* ltreewalk */);
233
 
234
	for(;;) {
235
		if(*t == nil)
236
			return t;
237
 
238
		assert((*t)->magic == FREE_MAGIC);
239
 
240
		if(size == (*t)->size)
241
			return t;
242
		if(size < (*t)->size)
243
			t = &(*t)->left;
244
		else
245
			t = &(*t)->right;
246
	}
247
}
248
 
249
/* treelookup: find node in tree with size == size */
250
static Free*
251
treelookup(Free *t, ulong size)
252
{
253
	return *ltreewalk(&t, size);
254
}
255
 
256
/* treeinsert: insert node into tree */
257
static Free*
258
treeinsert(Free *tree, Free *node)
259
{
260
	Free **loc, *repl;
261
 
262
	assert(node != nil /* treeinsert */);
263
 
264
	loc = ltreewalk(&tree, node->size);
265
	if(*loc == nil) {
266
		node->left = nil;
267
		node->right = nil;
268
	} else {	/* replace existing node */
269
		repl = *loc;
270
		node->left = repl->left;
271
		node->right = repl->right;
272
	}
273
	*loc = node;
274
	return tree;
275
}
276
 
277
/* treedelete: remove node from tree */
278
static Free*
279
treedelete(Free *tree, Free *node)
280
{
281
	Free **loc, **lsucc, *succ;
282
 
283
	assert(node != nil /* treedelete */);
284
 
285
	loc = ltreewalk(&tree, node->size);
286
	assert(*loc == node);
287
 
288
	if(node->left == nil)
289
		*loc = node->right;
290
	else if(node->right == nil)
291
		*loc = node->left;
292
	else {
293
		/* have two children, use inorder successor as replacement */
294
		for(lsucc = &node->right; (*lsucc)->left; lsucc = &(*lsucc)->left)
295
			;
296
		succ = *lsucc;
297
		*lsucc = succ->right;
298
		succ->left = node->left;
299
		succ->right = node->right;
300
		*loc = succ;
301
	}
302
 
303
	node->left = node->right = Poison;
304
	return tree;
305
}
306
 
307
/* treelookupgt: find smallest node in tree with size >= size */
308
static Free*
309
treelookupgt(Free *t, ulong size)
310
{
311
	Free *lastgood;	/* last node we saw that was big enough */
312
 
313
	lastgood = nil;
314
	for(;;) {
315
		if(t == nil)
316
			return lastgood;
317
		if(size == t->size)
318
			return t;
319
		if(size < t->size) {
320
			lastgood = t;
321
			t = t->left;
322
		} else
323
			t = t->right;
324
	}
325
}
326
 
327
/* 
328
 * List maintenance
329
 */
330
 
331
/* listadd: add a node to a doubly linked list */
332
static Free*
333
listadd(Free *list, Free *node)
334
{
335
	if(list == nil) {
336
		node->next = node;
337
		node->prev = node;
338
		return node;
339
	}
340
 
341
	node->prev = list->prev;
342
	node->next = list;
343
 
344
	node->prev->next = node;
345
	node->next->prev = node;
346
 
347
	return list;
348
}
349
 
350
/* listdelete: remove node from a doubly linked list */
351
static Free*
352
listdelete(Pool *p, Free *list, Free *node)
353
{
354
	if(node->next == node) {	/* singular list */
355
		node->prev = node->next = Poison;
356
		return nil;
357
	}
358
	if(node->next == nil)
359
		p->panic(p, "pool->next");
360
	if(node->prev == nil)
361
		p->panic(p, "pool->prev");
362
	node->next->prev = node->prev;
363
	node->prev->next = node->next;
364
 
365
	if(list == node)
366
		list = node->next;
367
 
368
	node->prev = node->next = Poison;
369
	return list;
370
}
371
 
372
/*
373
 * Pool maintenance
374
 */
375
 
376
/* pooladd: add anode to the free pool */
377
static Free*
378
pooladd(Pool *p, Alloc *anode)
379
{
380
	Free *lst, *olst;
381
	Free *node;
382
	Free **parent;
383
 
384
	antagonism {
385
		memmark(_B2D(anode), 0xF7, anode->size-sizeof(Bhdr)-sizeof(Btail));
386
	}
387
 
388
	node = (Free*)anode;
389
	node->magic = FREE_MAGIC;
390
	parent = ltreewalk(&p->freeroot, node->size);
391
	olst = *parent;
392
	lst = listadd(olst, node);
393
	if(olst != lst)	/* need to update tree */
394
		*parent = treeinsert(*parent, lst);
395
	p->curfree += node->size;
396
	return node;
397
}
398
 
399
/* pooldel: remove node from the free pool */
400
static Alloc*
401
pooldel(Pool *p, Free *node)
402
{
403
	Free *lst, *olst;
404
	Free **parent;
405
 
406
	parent = ltreewalk(&p->freeroot, node->size);
407
	olst = *parent;
408
	assert(olst != nil /* pooldel */);
409
 
410
	lst = listdelete(p, olst, node);
411
	if(lst == nil)
412
		*parent = treedelete(*parent, olst);
413
	else if(lst != olst)
414
		*parent = treeinsert(*parent, lst);
415
 
416
	node->left = node->right = Poison;
417
	p->curfree -= node->size;
418
 
419
	antagonism {
420
		memmark(_B2D(node), 0xF9, node->size-sizeof(Bhdr)-sizeof(Btail));
421
	}
422
 
423
	node->magic = UNALLOC_MAGIC;
424
	return (Alloc*)node;
425
}
426
 
427
/*
428
 * Block maintenance 
429
 */
430
/* block allocation */
431
static ulong
432
dsize2bsize(Pool *p, ulong sz)
433
{
434
	sz += sizeof(Bhdr)+sizeof(Btail);
435
	if(sz < p->minblock)
436
		sz = p->minblock;
437
	if(sz < MINBLOCKSIZE)
438
		sz = MINBLOCKSIZE;
439
	sz = (sz+p->quantum-1)&~(p->quantum-1);
440
	return sz;
441
}
442
 
443
static ulong
444
bsize2asize(Pool *p, ulong sz)
445
{
446
	sz += sizeof(Arena)+sizeof(Btail);
447
	if(sz < p->minarena)
448
		sz = p->minarena;
449
	sz = (sz+p->quantum)&~(p->quantum-1);
450
	return sz;
451
}
452
 
453
/* blockmerge: merge a and b, known to be adjacent */
454
/* both are removed from pool if necessary. */
455
static Alloc*
456
blockmerge(Pool *pool, Bhdr *a, Bhdr *b)
457
{
458
	Btail *t;
459
 
460
	assert(B2NB(a) == b);
461
 
462
	if(a->magic == FREE_MAGIC)
463
		pooldel(pool, (Free*)a);
464
	if(b->magic == FREE_MAGIC)
465
		pooldel(pool, (Free*)b);
466
 
467
	t = B2T(a);
468
	t->size = (ulong)Poison;
469
	t->magic0 = NOT_MAGIC;
470
	t->magic1 = NOT_MAGIC;
471
	PSHORT(t->datasize, NOT_MAGIC);
472
 
473
	a->size += b->size;
474
	t = B2T(a);
475
	t->size = a->size;
476
	PSHORT(t->datasize, 0xFFFF);
477
 
478
	b->size = NOT_MAGIC;
479
	b->magic = NOT_MAGIC;
480
 
481
	a->magic = UNALLOC_MAGIC;
482
	return (Alloc*)a;
483
}
484
 
485
/* blocksetsize: set the total size of a block, fixing tail pointers */
486
static Bhdr*
487
blocksetsize(Bhdr *b, ulong bsize)
488
{
489
	Btail *t;
490
 
491
	assert(b->magic != FREE_MAGIC /* blocksetsize */);
492
 
493
	b->size = bsize;
494
	t = B2T(b);
495
	t->size = b->size;
496
	t->magic0 = TAIL_MAGIC0;
497
	t->magic1 = TAIL_MAGIC1;
498
	return b;
499
}
500
 
501
/* getdsize: return the requested data size for an allocated block */
502
static ulong
503
getdsize(Alloc *b)
504
{
505
	Btail *t;
506
	t = B2T(b);
507
	return b->size - SHORT(t->datasize);
508
}
509
 
510
/* blocksetdsize: set the user data size of a block */
511
static Alloc*
512
blocksetdsize(Pool *p, Alloc *b, ulong dsize)
513
{
514
	Btail *t;
515
	uchar *q, *eq;
516
 
517
	assert(b->size >= dsize2bsize(p, dsize));
518
	assert(b->size - dsize < 0x10000);
519
 
520
	t = B2T(b);
521
	PSHORT(t->datasize, b->size - dsize);
522
 
523
	q=(uchar*)_B2D(b)+dsize;
524
	eq = (uchar*)t;
525
	if(eq > q+4)
526
		eq = q+4;
527
	for(; q<eq; q++)
528
		*q = datamagic[((ulong)(uintptr)q)%nelem(datamagic)];
529
 
530
	return b;
531
}
532
 
533
/* trim: trim a block down to what is needed to hold dsize bytes of user data */
534
static Alloc*
535
trim(Pool *p, Alloc *b, ulong dsize)
536
{
537
	ulong extra, bsize;
538
	Alloc *frag;
539
 
540
	bsize = dsize2bsize(p, dsize);
541
	extra = b->size - bsize;
542
	if(b->size - dsize >= 0x10000 ||
543
	  (extra >= bsize>>2 && extra >= MINBLOCKSIZE && extra >= p->minblock)) {
544
		blocksetsize(b, bsize);
545
		frag = (Alloc*) B2NB(b);
546
 
547
		antagonism {
548
			memmark(frag, 0xF1, extra);
549
		}
550
 
551
		frag->magic = UNALLOC_MAGIC;
552
		blocksetsize(frag, extra);
553
		pooladd(p, frag);
554
	}
555
 
556
	b->magic = ALLOC_MAGIC;
557
	blocksetdsize(p, b, dsize);
558
	return b;
559
}
560
 
561
static Alloc*
562
freefromfront(Pool *p, Alloc *b, ulong skip)
563
{
564
	Alloc *bb;
565
 
566
	skip = skip&~(p->quantum-1);
567
	if(skip >= 0x1000 || (skip >= b->size>>2 && skip >= MINBLOCKSIZE && skip >= p->minblock)){
568
		bb = (Alloc*)((uchar*)b+skip);
569
		blocksetsize(bb, b->size-skip);
570
		bb->magic = UNALLOC_MAGIC;
571
		blocksetsize(b, skip);
572
		b->magic = UNALLOC_MAGIC;
573
		pooladd(p, b);
574
		return bb;
575
	}
576
	return b;	
577
}
578
 
579
/*
580
 * Arena maintenance
581
 */
582
 
583
/* arenasetsize: set arena size, updating tail */
584
static void
585
arenasetsize(Arena *a, ulong asize)
586
{
587
	Bhdr *atail;
588
 
589
	a->asize = asize;
590
	atail = A2TB(a);
591
	atail->magic = ARENATAIL_MAGIC;
592
	atail->size = 0;
593
}
594
 
595
/* poolnewarena: allocate new arena */
596
static void
597
poolnewarena(Pool *p, ulong asize)
598
{
599
	Arena *a;
600
	Arena *ap, *lastap;
601
	Alloc *b;
602
 
603
	LOG(p, "newarena %lud\n", asize);
604
	if(p->cursize+asize > p->maxsize) {
605
		if(poolcompactl(p) == 0){
606
			LOG(p, "pool too big: %lud+%lud > %lud\n",
607
				p->cursize, asize, p->maxsize);
608
			werrstr("memory pool too large");
609
		}
610
		return;
611
	}
612
 
613
	if((a = p->alloc(asize)) == nil) {
614
		/* assume errstr set by p->alloc */
615
		return;
616
	}
617
 
618
	p->cursize += asize;
619
 
620
	/* arena hdr */
621
	a->magic = ARENA_MAGIC;
622
	blocksetsize(a, sizeof(Arena));
623
	arenasetsize(a, asize);
624
	blockcheck(p, a);
625
 
626
	/* create one large block in arena */
627
	b = (Alloc*)A2B(a);
628
	b->magic = UNALLOC_MAGIC;
629
	blocksetsize(b, (uchar*)A2TB(a)-(uchar*)b);
630
	blockcheck(p, b);
631
	pooladd(p, b);
632
	blockcheck(p, b);
633
 
634
	/* sort arena into descending sorted arena list */
635
	for(lastap=nil, ap=p->arenalist; ap > a; lastap=ap, ap=ap->down)
636
		;
637
 
638
	if(a->down = ap)	/* assign = */
639
		a->down->aup = a;
640
 
641
	if(a->aup = lastap)	/* assign = */
642
		a->aup->down = a;
643
	else
644
		p->arenalist = a;
645
 
646
	/* merge with surrounding arenas if possible */
647
	/* must do a with up before down with a (think about it) */
648
	if(a->aup)
649
		arenamerge(p, a, a->aup);
650
	if(a->down)
651
		arenamerge(p, a->down, a);
652
}
653
 
654
/* blockresize: grow a block to encompass space past its end, possibly by */
655
/* trimming it into two different blocks. */
656
static void
657
blockgrow(Pool *p, Bhdr *b, ulong nsize)
658
{
659
	if(b->magic == FREE_MAGIC) {
660
		Alloc *a;
661
		Bhdr *bnxt;
662
		a = pooldel(p, (Free*)b);
663
		blockcheck(p, a);
664
		blocksetsize(a, nsize);
665
		blockcheck(p, a);
666
		bnxt = B2NB(a);
667
		if(bnxt->magic == FREE_MAGIC)
668
			a = blockmerge(p, a, bnxt);
669
		blockcheck(p, a);
670
		pooladd(p, a);
671
	} else {
672
		Alloc *a;
673
		ulong dsize;
674
 
675
		a = (Alloc*)b;
676
		dsize = getdsize(a);
677
		blocksetsize(a, nsize);
678
		trim(p, a, dsize);
679
	}
680
}
681
 
682
/* arenamerge: attempt to coalesce to arenas that might be adjacent */
683
static Arena*
684
arenamerge(Pool *p, Arena *bot, Arena *top)
685
{
686
	Bhdr *bbot, *btop;
687
	Btail *t;
688
 
689
	blockcheck(p, bot);
690
	blockcheck(p, top);
691
	assert(bot->aup == top && top > bot);
692
 
693
	if(p->merge == nil || p->merge(bot, top) == 0)
694
		return nil;
695
 
696
	/* remove top from list */
697
	if(bot->aup = top->aup)	/* assign = */
698
		bot->aup->down = bot;
699
	else
700
		p->arenalist = bot;
701
 
702
	/* save ptrs to last block in bot, first block in top */
703
	t = B2PT(A2TB(bot));
704
	bbot = T2HDR(t);
705
	btop = A2B(top);
706
	blockcheck(p, bbot);
707
	blockcheck(p, btop);
708
 
709
	/* grow bottom arena to encompass top */
710
	arenasetsize(bot, top->asize + ((uchar*)top - (uchar*)bot));
711
 
712
	/* grow bottom block to encompass space between arenas */
713
	blockgrow(p, bbot, (uchar*)btop-(uchar*)bbot);
714
	blockcheck(p, bbot);
715
	return bot;
716
}
717
 
718
/* dumpblock: print block's vital stats */
719
static void
720
dumpblock(Pool *p, Bhdr *b)
721
{
722
	ulong *dp;
723
	ulong dsize;
724
	uchar *cp;
725
 
726
	dp = (ulong*)b;
727
	p->print(p, "pool %s block %p\nhdr %.8lux %.8lux %.8lux %.8lux %.8lux %.8lux\n",
728
		p->name, b, dp[0], dp[1], dp[2], dp[3], dp[4], dp[5], dp[6]);
729
 
730
	dp = (ulong*)B2T(b);
731
	p->print(p, "tail %.8lux %.8lux %.8lux %.8lux %.8lux %.8lux | %.8lux %.8lux\n",
732
		dp[-6], dp[-5], dp[-4], dp[-3], dp[-2], dp[-1], dp[0], dp[1]);
733
 
734
	if(b->magic == ALLOC_MAGIC){
735
		dsize = getdsize((Alloc*)b);
736
		if(dsize >= b->size)	/* user data size corrupt */
737
			return;
738
 
739
		cp = (uchar*)_B2D(b)+dsize;
740
		p->print(p, "user data ");
741
		p->print(p, "%.2ux %.2ux %.2ux %.2ux  %.2ux %.2ux %.2ux %.2ux",
742
			cp[-8], cp[-7], cp[-6], cp[-5], cp[-4], cp[-3], cp[-2], cp[-1]);
743
		p->print(p, " | %.2ux %.2ux %.2ux %.2ux  %.2ux %.2ux %.2ux %.2ux\n",
744
			cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
745
	}
746
}
747
 
748
static void
749
printblock(Pool *p, Bhdr *b, char *msg)
750
{
751
	p->print(p, "%s\n", msg);
752
	dumpblock(p, b);
753
}
754
 
755
static void
756
panicblock(Pool *p, Bhdr *b, char *msg)
757
{
758
	p->print(p, "%s\n", msg);
759
	dumpblock(p, b);
760
	p->panic(p, "pool panic");
761
}
762
 
763
/* blockcheck: ensure a block consistent with our expectations */
764
/* should only be called when holding pool lock */
765
static void
766
blockcheck(Pool *p, Bhdr *b)
767
{
768
	Alloc *a;
769
	Btail *t;
770
	int i, n;
771
	uchar *q, *bq, *eq;
772
	ulong dsize;
773
 
774
	switch(b->magic) {
775
	default:
776
		panicblock(p, b, "bad magic");
777
	case FREE_MAGIC:
778
	case UNALLOC_MAGIC:
779
	 	t = B2T(b);
780
		if(t->magic0 != TAIL_MAGIC0 || t->magic1 != TAIL_MAGIC1)
781
			panicblock(p, b, "corrupt tail magic");
782
		if(T2HDR(t) != b)
783
			panicblock(p, b, "corrupt tail ptr");
784
		break;
785
	case DEAD_MAGIC:
786
	 	t = B2T(b);
787
		if(t->magic0 != TAIL_MAGIC0 || t->magic1 != TAIL_MAGIC1)
788
			panicblock(p, b, "corrupt tail magic");
789
		if(T2HDR(t) != b)
790
			panicblock(p, b, "corrupt tail ptr");
791
		n = getdsize((Alloc*)b);
792
		q = _B2D(b);
793
		q += 8;
794
		for(i=8; i<n; i++)
795
			if(*q++ != 0xDA)
796
				panicblock(p, b, "dangling pointer write");
797
		break;
798
	case ARENA_MAGIC:
799
		b = A2TB((Arena*)b);
800
		if(b->magic != ARENATAIL_MAGIC)
801
			panicblock(p, b, "bad arena size");
802
		/* fall through */
803
	case ARENATAIL_MAGIC:
804
		if(b->size != 0)
805
			panicblock(p, b, "bad arena tail size");
806
		break;
807
	case ALLOC_MAGIC:
808
		a = (Alloc*)b;
809
	 	t = B2T(b);
810
		dsize = getdsize(a);
811
		bq = (uchar*)_B2D(a)+dsize;
812
		eq = (uchar*)t;
813
 
814
		if(t->magic0 != TAIL_MAGIC0){
815
			/* if someone wrote exactly one byte over and it was a NUL, we sometimes only complain. */
816
			if((p->flags & POOL_TOLERANCE) && bq == eq && t->magic0 == 0)
817
				printblock(p, b, "mem user overflow (magic0)");
818
			else
819
				panicblock(p, b, "corrupt tail magic0");
820
		}
821
 
822
		if(t->magic1 != TAIL_MAGIC1)
823
			panicblock(p, b, "corrupt tail magic1");
824
		if(T2HDR(t) != b)
825
			panicblock(p, b, "corrupt tail ptr");
826
 
827
		if(dsize2bsize(p, dsize)  > a->size)
828
			panicblock(p, b, "too much block data");
829
 
830
		if(eq > bq+4)
831
			eq = bq+4;
832
		for(q=bq; q<eq; q++){
833
			if(*q != datamagic[((uintptr)q)%nelem(datamagic)]){
834
				if(q == bq && *q == 0 && (p->flags & POOL_TOLERANCE)){
835
					printblock(p, b, "mem user overflow");
836
					continue;
837
				}
838
				panicblock(p, b, "mem user overflow");
839
			}
840
		}
841
		break;
842
	}
843
}
844
 
845
/*
846
 * compact an arena by shifting all the free blocks to the end.
847
 * assumes pool lock is held.
848
 */
849
enum {
850
	FLOATING_MAGIC = 0xCBCBCBCB,	/* temporarily neither allocated nor in the free tree */
851
};
852
 
853
static int
854
arenacompact(Pool *p, Arena *a)
855
{
856
	Bhdr *b, *wb, *eb, *nxt;
857
	int compacted;
858
 
859
	if(p->move == nil)
860
		p->panic(p, "don't call me when pool->move is nil\n");
861
 
862
	poolcheckarena(p, a);
863
	eb = A2TB(a);
864
	compacted = 0;
865
	for(b=wb=A2B(a); b && b < eb; b=nxt) {
866
		nxt = B2NB(b);
867
		switch(b->magic) {
868
		case FREE_MAGIC:
869
			pooldel(p, (Free*)b);
870
			b->magic = FLOATING_MAGIC;
871
			break;
872
		case ALLOC_MAGIC:
873
			if(wb != b) {
874
				memmove(wb, b, b->size);
875
				p->move(_B2D(b), _B2D(wb));
876
				compacted = 1;
877
			}
878
			wb = B2NB(wb);
879
			break;
880
		}
881
	}
882
 
883
	/*
884
	 * the only free data is now at the end of the arena, pointed
885
	 * at by wb.  all we need to do is set its size and get out.
886
	 */
887
	if(wb < eb) {
888
		wb->magic = UNALLOC_MAGIC;
889
		blocksetsize(wb, (uchar*)eb-(uchar*)wb);
890
		pooladd(p, (Alloc*)wb);
891
	}
892
 
893
	return compacted;		
894
}
895
 
896
/*
897
 * compact a pool by compacting each individual arena.
898
 * 'twould be nice to shift blocks from one arena to the
899
 * next but it's a pain to code.
900
 */
901
static int
902
poolcompactl(Pool *pool)
903
{
904
	Arena *a;
905
	int compacted;
906
 
907
	if(pool->move == nil || pool->lastcompact == pool->nfree)
908
		return 0;
909
 
910
	pool->lastcompact = pool->nfree;
911
	compacted = 0;
912
	for(a=pool->arenalist; a; a=a->down)
913
		compacted |= arenacompact(pool, a);
914
	return compacted;
915
}
916
 
917
/*
918
static int
919
poolcompactl(Pool*)
920
{
921
	return 0;
922
}
923
*/
924
 
925
/*
926
 * Actual allocators
927
 */
928
 
929
/*
930
static void*
931
_B2D(void *a)
932
{
933
	return (uchar*)a+sizeof(Bhdr);
934
}
935
*/
936
 
937
static void*
938
B2D(Pool *p, Alloc *a)
939
{
940
	if(a->magic != ALLOC_MAGIC)
941
		p->panic(p, "B2D called on unworthy block");
942
	return _B2D(a);
943
}
944
 
945
/*
946
static void*
947
_D2B(void *v)
948
{
949
	Alloc *a;
950
	a = (Alloc*)((uchar*)v-sizeof(Bhdr));
951
	return a;
952
}
953
*/
954
 
955
static Alloc*
956
D2B(Pool *p, void *v)
957
{
958
	Alloc *a;
959
	ulong *u;
960
 
961
	if((uintptr)v&(sizeof(ulong)-1))
962
		v = (char*)v - ((uintptr)v&(sizeof(ulong)-1));
963
	u = v;
964
	while(u[-1] == ALIGN_MAGIC)
965
		u--;
966
	a = _D2B(u);
967
	if(a->magic != ALLOC_MAGIC)
968
		p->panic(p, "D2B called on non-block %p (double-free?)", v);
969
	return a;
970
}
971
 
972
/* poolallocl: attempt to allocate block to hold dsize user bytes; assumes lock held */
973
static void*
974
poolallocl(Pool *p, ulong dsize)
975
{
976
	ulong bsize;
977
	Free *fb;
978
	Alloc *ab;
979
 
980
	if(dsize >= 0x80000000UL){	/* for sanity, overflow */
981
		werrstr("invalid allocation size");
982
		return nil;
983
	}
984
 
985
	bsize = dsize2bsize(p, dsize);
986
 
987
	fb = treelookupgt(p->freeroot, bsize);
988
	if(fb == nil) {
989
		poolnewarena(p, bsize2asize(p, bsize));
990
		if((fb = treelookupgt(p->freeroot, bsize)) == nil) {
991
			/* assume poolnewarena failed and set %r */
992
			return nil;
993
		}
994
	}
995
 
996
	ab = trim(p, pooldel(p, fb), dsize);
997
	p->curalloc += ab->size;
998
	antagonism {
999
		memset(B2D(p, ab), 0xDF, dsize);
1000
	}
1001
	return B2D(p, ab);
1002
}
1003
 
1004
/* poolreallocl: attempt to grow v to ndsize bytes; assumes lock held */
1005
static void*
1006
poolreallocl(Pool *p, void *v, ulong ndsize)
1007
{
1008
	Alloc *a;
1009
	Bhdr *left, *right, *newb;
1010
	Btail *t;
1011
	ulong nbsize;
1012
	ulong odsize;
1013
	ulong obsize;
1014
	void *nv;
1015
 
1016
	if(v == nil)	/* for ANSI */
1017
		return poolallocl(p, ndsize);
1018
	if(ndsize == 0) {
1019
		poolfreel(p, v);
1020
		return nil;
1021
	}
1022
	a = D2B(p, v);
1023
	blockcheck(p, a);
1024
	odsize = getdsize(a);
1025
	obsize = a->size;
1026
 
1027
	/* can reuse the same block? */
1028
	nbsize = dsize2bsize(p, ndsize);
1029
	if(nbsize <= a->size) {
1030
	Returnblock:
1031
		if(v != _B2D(a))
1032
			memmove(_B2D(a), v, odsize);
1033
		a = trim(p, a, ndsize);
1034
		p->curalloc -= obsize;
1035
		p->curalloc += a->size;
1036
		v = B2D(p, a);
1037
		return v;
1038
	}
1039
 
1040
	/* can merge with surrounding blocks? */
1041
	right = B2NB(a);
1042
	if(right->magic == FREE_MAGIC && a->size+right->size >= nbsize) {
1043
		a = blockmerge(p, a, right);
1044
		goto Returnblock;
1045
	}
1046
 
1047
	t = B2PT(a);
1048
	left = T2HDR(t);
1049
	if(left->magic == FREE_MAGIC && left->size+a->size >= nbsize) {
1050
		a = blockmerge(p, left, a);
1051
		goto Returnblock;
1052
	}
1053
 
1054
	if(left->magic == FREE_MAGIC && right->magic == FREE_MAGIC
1055
	&& left->size+a->size+right->size >= nbsize) {
1056
		a = blockmerge(p, blockmerge(p, left, a), right);
1057
		goto Returnblock;
1058
	}
1059
 
1060
	if((nv = poolallocl(p, ndsize)) == nil)
1061
		return nil;
1062
 
1063
	/* maybe the new block is next to us; if so, merge */
1064
	left = T2HDR(B2PT(a));
1065
	right = B2NB(a);
1066
	newb = D2B(p, nv);
1067
	if(left == newb || right == newb) {
1068
		if(left == newb || left->magic == FREE_MAGIC)
1069
			a = blockmerge(p, left, a);
1070
		if(right == newb || right->magic == FREE_MAGIC)
1071
			a = blockmerge(p, a, right);
1072
		assert(a->size >= nbsize);
1073
		goto Returnblock;
1074
	}
1075
 
1076
	/* enough cleverness */
1077
	memmove(nv, v, odsize);
1078
	antagonism { 
1079
		memset((char*)nv+odsize, 0xDE, ndsize-odsize);
1080
	}
1081
	poolfreel(p, v);
1082
	return nv;
1083
}
1084
 
1085
static void*
1086
alignptr(void *v, ulong align, long offset)
1087
{
1088
	char *c;
1089
	ulong off;
1090
 
1091
	c = v;
1092
	if(align){
1093
		off = (uintptr)c%align;
1094
		if(off != offset){
1095
			c += offset - off;
1096
			if(off > offset)
1097
				c += align;
1098
		}
1099
	}
1100
	return c;
1101
}
1102
 
1103
/* poolallocalignl: allocate as described below; assumes pool locked */
1104
static void*
1105
poolallocalignl(Pool *p, ulong dsize, ulong align, long offset, ulong span)
1106
{
1107
	ulong asize;
1108
	void *v;
1109
	char *c;
1110
	ulong *u;
1111
	int skip;
1112
	Alloc *b;
1113
 
1114
	/*
1115
	 * allocate block
1116
	 * 	dsize bytes
1117
	 *	addr == offset (modulo align)
1118
	 *	does not cross span-byte block boundary
1119
	 *
1120
	 * to satisfy alignment, just allocate an extra
1121
	 * align bytes and then shift appropriately.
1122
	 * 
1123
	 * to satisfy span, try once and see if we're
1124
	 * lucky.  the second time, allocate 2x asize
1125
	 * so that we definitely get one not crossing
1126
	 * the boundary.
1127
	 */
1128
	if(align){
1129
		if(offset < 0)
1130
			offset = align - ((-offset)%align);
1131
		else
1132
			offset %= align;
1133
	}
1134
	asize = dsize+align;
1135
	v = poolallocl(p, asize);
1136
	if(v == nil)
1137
		return nil;
1138
	if(span && (uintptr)v/span != ((uintptr)v+asize)/span){
1139
		/* try again */
1140
		poolfreel(p, v);
1141
		v = poolallocl(p, 2*asize);
1142
		if(v == nil)
1143
			return nil;
1144
	}
1145
 
1146
	/*
1147
	 * figure out what pointer we want to return
1148
	 */
1149
	c = alignptr(v, align, offset);
1150
	if(span && (uintptr)c/span != (uintptr)(c+dsize-1)/span){
1151
		c += span - (uintptr)c%span;
1152
		c = alignptr(c, align, offset);
1153
		if((uintptr)c/span != (uintptr)(c+dsize-1)/span){
1154
			poolfreel(p, v);
1155
			werrstr("cannot satisfy dsize %lud span %lud with align %lud+%ld", dsize, span, align, offset);
1156
			return nil;
1157
		}
1158
	}
1159
	skip = c - (char*)v;
1160
 
1161
	/*
1162
	 * free up the skip bytes before that pointer
1163
	 * or mark it as unavailable.
1164
	 */
1165
	b = _D2B(v);
1166
	b = freefromfront(p, b, skip);
1167
	v = _B2D(b);
1168
	skip = c - (char*)v;
1169
	if(c > (char*)v){
1170
		u = v;
1171
		while(c >= (char*)u+sizeof(ulong))
1172
			*u++ = ALIGN_MAGIC;
1173
	}
1174
	trim(p, b, skip+dsize);
1175
	assert(D2B(p, c) == b);
1176
	antagonism { 
1177
		memset(c, 0xDD, dsize);
1178
	}
1179
	return c;
1180
}
1181
 
1182
/* poolfree: free block obtained from poolalloc; assumes lock held */
1183
static void
1184
poolfreel(Pool *p, void *v)
1185
{
1186
	Alloc *ab;
1187
	Bhdr *back, *fwd;
1188
 
1189
	if(v == nil)	/* for ANSI */
1190
		return;
1191
 
1192
	ab = D2B(p, v);
1193
	blockcheck(p, ab);
1194
 
1195
	if(p->flags&POOL_NOREUSE){
1196
		int n;
1197
 
1198
		ab->magic = DEAD_MAGIC;
1199
		n = getdsize(ab)-8;
1200
		if(n > 0)
1201
			memset((uchar*)v+8, 0xDA, n);
1202
		return;	
1203
	}
1204
 
1205
	p->nfree++;
1206
	p->curalloc -= ab->size;
1207
	back = T2HDR(B2PT(ab));
1208
	if(back->magic == FREE_MAGIC)
1209
		ab = blockmerge(p, back, ab);
1210
 
1211
	fwd = B2NB(ab);
1212
	if(fwd->magic == FREE_MAGIC)
1213
		ab = blockmerge(p, ab, fwd);
1214
 
1215
	pooladd(p, ab);
1216
}
1217
 
1218
void*
1219
poolalloc(Pool *p, ulong n)
1220
{
1221
	void *v;
1222
 
1223
	p->lock(p);
1224
	paranoia {
1225
		poolcheckl(p);
1226
	}
1227
	verbosity {
1228
		pooldumpl(p);
1229
	}
1230
	v = poolallocl(p, n);
1231
	paranoia {
1232
		poolcheckl(p);
1233
	}
1234
	verbosity {
1235
		pooldumpl(p);
1236
	}
1237
	if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1238
	LOG(p, "poolalloc %p %lud = %p\n", p, n, v);
1239
	p->unlock(p);
1240
	return v;
1241
}
1242
 
1243
void*
1244
poolallocalign(Pool *p, ulong n, ulong align, long offset, ulong span)
1245
{
1246
	void *v;
1247
 
1248
	p->lock(p);
1249
	paranoia {
1250
		poolcheckl(p);
1251
	}
1252
	verbosity {
1253
		pooldumpl(p);
1254
	}
1255
	v = poolallocalignl(p, n, align, offset, span);
1256
	paranoia {
1257
		poolcheckl(p);
1258
	}
1259
	verbosity {
1260
		pooldumpl(p);
1261
	}
1262
	if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1263
	LOG(p, "poolalignspanalloc %p %lud %lud %lud %ld = %p\n", p, n, align, span, offset, v);
1264
	p->unlock(p);
1265
	return v;
1266
}
1267
 
1268
int
1269
poolcompact(Pool *p)
1270
{
1271
	int rv;
1272
 
1273
	p->lock(p);
1274
	paranoia {
1275
		poolcheckl(p);
1276
	}
1277
	verbosity {
1278
		pooldumpl(p);
1279
	}
1280
	rv = poolcompactl(p);
1281
	paranoia {
1282
		poolcheckl(p);
1283
	}
1284
	verbosity {
1285
		pooldumpl(p);
1286
	}
1287
	LOG(p, "poolcompact %p\n", p);
1288
	p->unlock(p);
1289
	return rv;
1290
}
1291
 
1292
void*
1293
poolrealloc(Pool *p, void *v, ulong n)
1294
{
1295
	void *nv;
1296
 
1297
	p->lock(p);
1298
	paranoia {
1299
		poolcheckl(p);
1300
	}
1301
	verbosity {
1302
		pooldumpl(p);
1303
	}
1304
	nv = poolreallocl(p, v, n);
1305
	paranoia {
1306
		poolcheckl(p);
1307
	}
1308
	verbosity {
1309
		pooldumpl(p);
1310
	}
1311
	if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1312
	LOG(p, "poolrealloc %p %p %ld = %p\n", p, v, n, nv);
1313
	p->unlock(p);
1314
	return nv;
1315
}
1316
 
1317
void
1318
poolfree(Pool *p, void *v)
1319
{
1320
	p->lock(p);
1321
	paranoia {
1322
		poolcheckl(p);
1323
	}
1324
	verbosity {
1325
		pooldumpl(p);
1326
	}
1327
	poolfreel(p, v);
1328
	paranoia {
1329
		poolcheckl(p);
1330
	}
1331
	verbosity {
1332
		pooldumpl(p);
1333
	}
1334
	if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1335
	LOG(p, "poolfree %p %p\n", p, v);
1336
	p->unlock(p);
1337
}
1338
 
1339
/*
1340
 * Return the real size of a block, and let the user use it. 
1341
 */
1342
ulong
1343
poolmsize(Pool *p, void *v)
1344
{
1345
	Alloc *b;
1346
	ulong dsize;
1347
 
1348
	p->lock(p);
1349
	paranoia {
1350
		poolcheckl(p);
1351
	}
1352
	verbosity {
1353
		pooldumpl(p);
1354
	}
1355
	if(v == nil)	/* consistency with other braindead ANSI-ness */
1356
		dsize = 0;
1357
	else {
1358
		b = D2B(p, v);
1359
		dsize = (b->size&~(p->quantum-1)) - sizeof(Bhdr) - sizeof(Btail);
1360
		assert(dsize >= getdsize(b));
1361
		blocksetdsize(p, b, dsize);
1362
	}
1363
	paranoia {
1364
		poolcheckl(p);
1365
	}
1366
	verbosity {
1367
		pooldumpl(p);
1368
	}
1369
	if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1370
	LOG(p, "poolmsize %p %p = %ld\n", p, v, dsize);
1371
	p->unlock(p);
1372
	return dsize;
1373
}
1374
 
1375
/*
1376
 * Debugging 
1377
 */
1378
 
1379
static void
1380
poolcheckarena(Pool *p, Arena *a)
1381
{
1382
	Bhdr *b;
1383
	Bhdr *atail;
1384
 
1385
	atail = A2TB(a);
1386
	for(b=a; b->magic != ARENATAIL_MAGIC && b<atail; b=B2NB(b))
1387
		blockcheck(p, b);
1388
	blockcheck(p, b);
1389
	if(b != atail)
1390
		p->panic(p, "found wrong tail");
1391
}
1392
 
1393
static void
1394
poolcheckl(Pool *p)
1395
{
1396
	Arena *a;
1397
 
1398
	for(a=p->arenalist; a; a=a->down)
1399
		poolcheckarena(p, a);
1400
	if(p->freeroot)
1401
		checktree(p->freeroot, 0, 1<<30);
1402
}
1403
 
1404
void
1405
poolcheck(Pool *p)
1406
{
1407
	p->lock(p);
1408
	poolcheckl(p);
1409
	p->unlock(p);
1410
}
1411
 
1412
void
1413
poolblockcheck(Pool *p, void *v)
1414
{
1415
	if(v == nil)
1416
		return;
1417
 
1418
	p->lock(p);
1419
	blockcheck(p, D2B(p, v));
1420
	p->unlock(p);
1421
}
1422
 
1423
static void
1424
pooldumpl(Pool *p)
1425
{
1426
	Arena *a;
1427
 
1428
	p->print(p, "pool %p %s\n", p, p->name);
1429
	for(a=p->arenalist; a; a=a->down)
1430
		pooldumparena(p, a);
1431
}
1432
 
1433
void
1434
pooldump(Pool *p)
1435
{
1436
	p->lock(p);
1437
	pooldumpl(p);
1438
	p->unlock(p);
1439
}
1440
 
1441
static void
1442
pooldumparena(Pool *p, Arena *a)
1443
{
1444
	Bhdr *b;
1445
 
1446
	for(b=a; b->magic != ARENATAIL_MAGIC; b=B2NB(b))
1447
		p->print(p, "(%p %.8lux %lud)", b, b->magic, b->size);
1448
	p->print(p, "\n");
1449
}
1450
 
1451
/*
1452
 * mark the memory in such a way that we know who marked it
1453
 * (via the signature) and we know where the marking started.
1454
 */
1455
static void
1456
memmark(void *v, int sig, ulong size)
1457
{
1458
	uchar *p, *ep;
1459
	ulong *lp, *elp;
1460
	lp = v;
1461
	elp = lp+size/4;
1462
	while(lp < elp)
1463
		*lp++ = (sig<<24) ^ ((uintptr)lp-(uintptr)v);
1464
	p = (uchar*)lp;
1465
	ep = (uchar*)v+size;
1466
	while(p<ep)
1467
		*p++ = sig;
1468
}