Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
#include <u.h>
2
#include <libc.h>
3
#include <auth.h>
4
#include <fcall.h>
5
#include "dat.h"
6
#include "fns.h"
7
 
8
/*
9
 * We used to use 100 i/o buffers of size 2kb (Sectorsize).
10
 * Unfortunately, reading 2kb at a time often hopping around
11
 * the disk doesn't let us get near the disk bandwidth.
12
 *
13
 * Based on a trace of iobuf address accesses taken while
14
 * tarring up a Plan 9 distribution CD, we now use 16 128kb
15
 * buffers.  This works for ISO9660 because data is required
16
 * to be laid out contiguously; effectively we're doing agressive
17
 * readahead.  Because the buffers are so big and the typical 
18
 * disk accesses so concentrated, it's okay that we have so few
19
 * of them.
20
 *
21
 * If this is used to access multiple discs at once, it's not clear
22
 * how gracefully the scheme degrades, but I'm not convinced
23
 * it's worth worrying about.		-rsc
24
 */
25
 
26
/* trying a larger value to get greater throughput - geoff */
27
#define	BUFPERCLUST	256 /* sectors/cluster; was 64, 64*Sectorsize = 128kb */
28
#define	NCLUST		16
29
 
30
int nclust = NCLUST;
31
 
32
static Ioclust*	iohead;
33
static Ioclust*	iotail;
34
 
35
static Ioclust*	getclust(Xdata*, vlong);
36
static void	putclust(Ioclust*);
37
static void	xread(Ioclust*);
38
 
39
void
40
iobuf_init(void)
41
{
42
	int i, j, n;
43
	Ioclust *c;
44
	Iobuf *b;
45
	uchar *mem;
46
 
47
	n = nclust*sizeof(Ioclust) +
48
		nclust*BUFPERCLUST*(sizeof(Iobuf)+Sectorsize);
49
	mem = sbrk(n);
50
	if(mem == (void*)-1)
51
		panic(0, "iobuf_init");
52
	memset(mem, 0, n);
53
 
54
	for(i=0; i<nclust; i++){
55
		c = (Ioclust*)mem;
56
		mem += sizeof(Ioclust);
57
		c->addr = -1;
58
		c->prev = iotail;
59
		if(iotail)
60
			iotail->next = c;
61
		iotail = c;
62
		if(iohead == nil)
63
			iohead = c;
64
 
65
		c->buf = (Iobuf*)mem;
66
		mem += BUFPERCLUST*sizeof(Iobuf);
67
		c->iobuf = mem;
68
		mem += BUFPERCLUST*Sectorsize;
69
		for(j=0; j<BUFPERCLUST; j++){
70
			b = &c->buf[j];
71
			b->clust = c;
72
			b->addr = -1;
73
			b->iobuf = c->iobuf+j*Sectorsize;
74
		}
75
	}
76
}
77
 
78
void
79
purgebuf(Xdata *dev)
80
{
81
	Ioclust *p;
82
 
83
	for(p=iohead; p!=nil; p=p->next)
84
		if(p->dev == dev){
85
			p->addr = -1;
86
			p->busy = 0;
87
		}
88
}
89
 
90
static Ioclust*
91
getclust(Xdata *dev, vlong addr)
92
{
93
	Ioclust *c, *f;
94
 
95
	f = nil;
96
	for(c=iohead; c; c=c->next){
97
		if(!c->busy)
98
			f = c;
99
		if(c->addr == addr && c->dev == dev){
100
			c->busy++;
101
			return c;
102
		}
103
	}
104
 
105
	if(f == nil)
106
		panic(0, "out of buffers");
107
 
108
	f->addr = addr;
109
	f->dev = dev;
110
	f->busy++;
111
	if(waserror()){
112
		f->addr = -1;	/* stop caching */
113
		putclust(f);
114
		nexterror();
115
	}
116
	xread(f);
117
	poperror();
118
	return f;
119
}
120
 
121
static void
122
putclust(Ioclust *c)
123
{
124
	if(c->busy <= 0)
125
		panic(0, "putbuf");
126
	c->busy--;
127
 
128
	/* Link onto head for LRU */
129
	if(c == iohead)
130
		return;
131
	c->prev->next = c->next;
132
 
133
	if(c->next)
134
		c->next->prev = c->prev;
135
	else
136
		iotail = c->prev;
137
 
138
	c->prev = nil;
139
	c->next = iohead;
140
	iohead->prev = c;
141
	iohead = c;
142
}
143
 
144
Iobuf*
145
getbuf(Xdata *dev, uvlong addr)
146
{
147
	int off;
148
	Ioclust *c;
149
 
150
	off = addr%BUFPERCLUST;
151
	c = getclust(dev, addr - off);
152
	if(c->nbuf < off){
153
		c->busy--;
154
		error("short read or I/O error");
155
	}
156
	return &c->buf[off];
157
}
158
 
159
void
160
putbuf(Iobuf *b)
161
{
162
	putclust(b->clust);
163
}
164
 
165
static void
166
xread(Ioclust *c)
167
{
168
	int n;
169
	Xdata *dev;
170
 
171
	dev = c->dev;
172
	seek(dev->dev, (vlong)c->addr * Sectorsize, 0);
173
	n = readn(dev->dev, c->iobuf, BUFPERCLUST*Sectorsize);
174
	if(n < Sectorsize)
175
		error("short read or I/O error");
176
	c->nbuf = n/Sectorsize;
177
}