Subversion Repositories planix.SVN

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 - 1
/*
2
 * Atheros 71xx ethernets for rb450g.
3
 *
4
 * all 5 PHYs are accessible only through first ether's register space.
5
 *
6
 * TODO:
7
 *	promiscuous mode.
8
 *	make ether1 work: probably needs mii/phy initialisation,
9
 *	maybe needs 8316 switch code too (which requires mdio, phy, etc. glop).
10
 * to maybe do some day:
11
 *	dig mac addresses out & config phy/mii via spi or other grot and swill
12
 *	(instead of editing rb config file).
13
 */
14
#include	"u.h"
15
#include	"../port/lib.h"
16
#include	"mem.h"
17
#include	"dat.h"
18
#include	"fns.h"
19
#include	"io.h"
20
#include	"../port/error.h"
21
#include	"../port/netif.h"
22
#include	"etherif.h"
23
#include	"ethermii.h"
24
#include	<pool.h>
25
 
26
enum {
27
	Ntd	= 64,
28
	Nrd	= 256,
29
	Nrb	= 1024,
30
 
31
	Bufalign= 4,
32
	Rbsz	= ETHERMAXTU + 4,	/* 4 for CRC */
33
};
34
 
35
extern uchar arge0mac[Eaddrlen];	/* see rb config file */
36
extern uchar arge1mac[Eaddrlen];
37
 
38
typedef struct Arge Arge;
39
typedef struct Ctlr Ctlr;
40
typedef struct Desc Desc;
41
typedef struct Etherif Etherif;
42
 
43
/*
44
 *  device registers
45
 */
46
struct Arge {
47
	ulong	cfg1;
48
	ulong	cfg2;
49
	ulong	ifg;
50
	ulong	hduplex;
51
	ulong	maxframelen;
52
	uchar	_pad0[0x20 - 0x14];
53
 
54
	ulong	miicfg;
55
	ulong	miicmd;
56
	ulong	miiaddr;
57
	ulong	miictl;
58
	ulong	miists;
59
	ulong	miiindic;
60
 
61
	ulong	ifctl;
62
	ulong	_pad1;
63
	ulong	staaddr1;
64
	ulong	staaddr2;
65
 
66
	ulong	fifocfg[3];
67
	ulong	fifotxthresh;
68
	ulong	fiforxfiltmatch;
69
	ulong	fiforxfiltmask;
70
	ulong	fiforam[7];
71
	uchar	_pad2[0x180 - 0x7c];
72
 
73
	/* dma */
74
	ulong	txctl;
75
	ulong	txdesc;
76
	ulong	txsts;
77
	ulong	rxctl;
78
	ulong	rxdesc;
79
	ulong	rxsts;
80
	ulong	dmaintr;
81
	ulong	dmaintrsts;
82
};
83
 
84
enum {
85
	Cfg1softrst		= 1 << 31,
86
	Cfg1simulrst		= 1 << 30,
87
	Cfg1macrxblkrst		= 1 << 19,
88
	Cfg1mactxblkrst		= 1 << 18,
89
	Cfg1rxfuncrst		= 1 << 17,
90
	Cfg1txfuncrst		= 1 << 16,
91
	Cfg1loopback		= 1 <<  8,
92
	Cfg1rxflowctl		= 1 <<  5,
93
	Cfg1txflowctl		= 1 <<  4,
94
	Cfg1syncrx		= 1 <<  3,
95
	Cfg1rxen		= 1 <<  2,
96
	Cfg1synctx		= 1 <<  1,
97
	Cfg1txen		= 1 <<  0,
98
 
99
	Cfg2preamblelenmask	= 0xf,
100
	Cfg2preamblelenshift	= 12,
101
	Cfg2ifmode1000		= 2 << 8,
102
	Cfg2ifmode10_100	= 1 << 8,
103
	Cfg2ifmodeshift		= 8,
104
	Cfg2ifmodemask		= 3,
105
	Cfg2hugeframe		= 1 << 5,
106
	Cfg2lenfield		= 1 << 4,
107
	Cfg2enpadcrc		= 1 << 2,
108
	Cfg2encrc		= 1 << 1,
109
	Cfg2fdx			= 1 << 0,
110
 
111
	Miicfgrst		= 1 << 31,
112
	Miicfgscanautoinc	= 1 <<  5,
113
	Miicfgpreamblesup	= 1 <<  4,
114
	Miicfgclkselmask	= 0x7,
115
	Miicfgclkdiv4		= 0,
116
	Miicfgclkdiv6		= 2,
117
	Miicfgclkdiv8		= 3,
118
	Miicfgclkdiv10		= 4,
119
	Miicfgclkdiv14		= 5,
120
	Miicfgclkdiv20		= 6,
121
	Miicfgclkdiv28		= 7,
122
 
123
	Miicmdscancycle		= 1 << 1,
124
	Miicmdread		= 1,
125
	Miicmdwrite		= 0,
126
 
127
	Miiphyaddrshift		= 8,
128
	Miiphyaddrmask		= 0xff,
129
	Miiregmask		= 0x1f,
130
 
131
	Miictlmask		= 0xffff,
132
 
133
	Miistsmask		= 0xffff,
134
 
135
	Miiindicinvalid		= 1 << 2,
136
	Miiindicscanning	= 1 << 1,
137
	Miiindicbusy		= 1 << 0,
138
 
139
	Ifctlspeed		= 1 << 16,
140
 
141
	Fifocfg0txfabric	= 1 << 4,
142
	Fifocfg0txsys		= 1 << 3,
143
	Fifocfg0rxfabric	= 1 << 2,
144
	Fifocfg0rxsys		= 1 << 1,
145
	Fifocfg0watermark	= 1 << 0,
146
	Fifocfg0all		= MASK(5),
147
	Fifocfg0enshift		= 8,
148
 
149
	/*
150
	 * these flags applicable both to filter mask and to filter match.
151
	 * `Ff' is for `fifo filter'.
152
	 */
153
	Ffunicast		= 1 << 17,
154
	Fftruncframe		= 1 << 16,
155
	Ffvlantag		= 1 << 15,
156
	Ffunsupopcode		= 1 << 14,
157
	Ffpauseframe		= 1 << 13,
158
	Ffctlframe		= 1 << 12,
159
	Fflongevent		= 1 << 11,
160
	Ffdribblenibble		= 1 << 10,
161
	Ffbcast			= 1 <<  9,
162
	Ffmcast			= 1 <<  8,
163
	Ffok			= 1 <<  7,
164
	Ffoorange		= 1 <<  6,
165
	Fflenmsmtch		= 1 <<  5,
166
	Ffcrcerr		= 1 <<  4,
167
	Ffcodeerr		= 1 <<  3,
168
	Fffalsecarrier		= 1 <<  2,
169
	Ffrxdvevent		= 1 <<  1,
170
	Ffdropevent		= 1 <<  0,
171
	/*
172
	 * exclude unicast and truncated frames from matching.
173
	 */
174
	Ffmatchdflt = Ffvlantag | Ffunsupopcode | Ffpauseframe | Ffctlframe |
175
		Fflongevent | Ffdribblenibble | Ffbcast | Ffmcast | Ffok |
176
		Ffoorange | Fflenmsmtch | Ffcrcerr | Ffcodeerr |
177
		Fffalsecarrier | Ffrxdvevent | Ffdropevent,
178
 
179
	/* `Frm' is for `fifo receive mask'. */
180
	Frmbytemode		= 1 << 19,
181
	Frmnoshortframe		= 1 << 18,
182
	Frmbit17		= 1 << 17,
183
	Frmbit16		= 1 << 16,
184
	Frmtruncframe		= 1 << 15,
185
	Frmlongevent		= 1 << 14,
186
	Frmvlantag		= 1 << 13,
187
	Frmunsupopcode		= 1 << 12,
188
	Frmpauseframe		= 1 << 11,
189
	Frmctlframe		= 1 << 10,
190
	Frmdribblenibble	= 1 <<  9,
191
	Frmbcast		= 1 <<  8,
192
	Frmmcast		= 1 <<  7,
193
	Frmok			= 1 <<  6,
194
	Frmoorange		= 1 <<  5,
195
	Frmlenmsmtch		= 1 <<  4,
196
	Frmcodeerr		= 1 <<  3,
197
	Frmfalsecarrier		= 1 <<  2,
198
	Frmrxdvevent		= 1 <<  1,
199
	Frmdropevent		= 1 <<  0,
200
	/*
201
	 *  len. mismatch, unsupp. opcode and short frame bits excluded
202
	 */
203
	Ffmaskdflt = Frmnoshortframe | Frmbit17 | Frmbit16 | Frmtruncframe |
204
		Frmlongevent | Frmvlantag | Frmpauseframe | Frmctlframe |
205
		Frmdribblenibble | Frmbcast | Frmmcast | Frmok | Frmoorange |
206
		Frmcodeerr | Frmfalsecarrier | Frmrxdvevent | Frmdropevent,
207
 
208
	Dmatxctlen	= 1 << 0,
209
 
210
	/* dma tx status */
211
	Txpcountmask	= 0xff,
212
	Txpcountshift	= 16,
213
	Txbuserr	= 1 << 3,
214
	Txunderrun	= 1 << 1,
215
	Txpktsent	= 1 << 0,
216
 
217
	Dmarxctlen	= 1 << 0,
218
 
219
	/* dma rx status */
220
	Rxpcountmask	= 0xff,
221
	Rxpcountshift	= 16,
222
	Rxbuserr	= 1 << 3,
223
	Rxovflo		= 1 << 2,
224
	Rxpktrcvd	= 1 << 0,
225
 
226
	/* dmaintr & dmaintrsts bits */
227
	Dmarxbuserr	= 1 << 7,
228
	Dmarxovflo	= 1 << 6,
229
	Dmarxpktrcvd	= 1 << 4,
230
	Dmatxbuserr	= 1 << 3,
231
	Dmatxunderrun	= 1 << 1,
232
	Dmatxpktsent	= 1 << 0,
233
	/* we don't really need most tx interrupts */
234
	Dmaall		= Dmarxbuserr | Dmarxovflo | Dmarxpktrcvd | Dmatxbuserr,
235
 
236
	Spictlremapdisable	= 1 << 6,
237
	Spictlclkdividermask	= MASK(6),
238
 
239
	Spiioctlcs2		= 1 << 18,
240
	Spiioctlcs1		= 1 << 17,
241
	Spiioctlcs0		= 1 << 16,
242
	Spiioctlcsmask		= 7 << 16,
243
	Spiioctlclk		= 1 << 8,
244
	Spiioctldo		= 1,
245
};
246
 
247
struct Spi {			/* at 0x1f000000 */
248
	ulong	fs;
249
	ulong	ctl;
250
	ulong	ioctl;
251
	ulong	rds;
252
};
253
 
254
/* hw descriptors of buffer rings (rx and tx), need to be uncached */
255
struct Desc {
256
	ulong	addr;		/* of packet buffer */
257
	ulong	ctl;
258
	Desc	*next;
259
	ulong	_pad;
260
};
261
 
262
enum {
263
	Descempty	= 1 << 31,
264
	Descmore	= 1 << 24,
265
	Descszmask	= MASK(12),
266
};
267
#define DMASIZE(len)	((len) & Descszmask)
268
 
269
struct Ctlr {
270
	Arge	*regs;
271
	Ether*	edev;			/* backward pointer */
272
 
273
	Lock;				/* attach */
274
	int	init;
275
	int	attached;
276
 
277
	Mii*	mii;
278
	Rendez	lrendez;
279
	int	lim;
280
	int	link;
281
	int	phymask;
282
 
283
	/* receiver */
284
	Rendez	rrendez;
285
	uint	rintr;			/* count */
286
	int	pktstoread;		/* flag */
287
	int	discard;
288
	/* rx descriptors */
289
	Desc*	rdba;			/* base address */
290
	Block**	rd;
291
	uint	rdh;			/* head */
292
	uint	rdt;			/* tail */
293
	uint	nrdfree;		/* rd's awaiting pkts (sort of) */
294
 
295
	/* transmitter */
296
	Rendez	trendez;
297
	uint	tintr;			/* count */
298
	int	pktstosend;		/* flag */
299
	int	ntq;
300
	/* tx descriptors */
301
	Desc*	tdba;			/* base address */
302
	Block**	td;
303
	uint	tdh;			/* head */
304
	uint	tdt;			/* tail */
305
};
306
 
307
struct Etherif {
308
	uintptr	regs;
309
	int	irq;
310
	uchar	*mac;
311
	int	phymask;
312
};
313
 
314
static Etherif etherifs[] = {
315
	{ 0x1a000000, ILenet0, arge0mac, 1<<4 },
316
	{ 0x19000000, ILenet1, arge1mac, MASK(4) },
317
};
318
 
319
static Ether *etherxx[MaxEther];
320
static Lock athrblock;		/* free receive Blocks */
321
static Block* athrbpool;	/* receive Blocks for all ath controllers */
322
 
323
static void	athrbfree(Block* bp);
324
 
325
/*
326
 * ar8316 ether switch
327
 */
328
 
329
enum {
330
	Swrgmii	= 0,
331
	Swgmii	= 1,
332
	Swphy4cpu = 0, /* flag: port 4 connected to CPU (not internal switch) */
333
};
334
 
335
typedef struct Switch Switch;
336
struct Switch {
337
	int	page;
338
	int	scdev;
339
};
340
 
341
enum {
342
	/* atheros-specific mii registers */
343
	Miiathdbgaddr	= 0x1d,
344
	Miiathdbgdata	= 0x1e,
345
 
346
	Swregmask	= 0,
347
		Swmaskrevmask	= 0x00ff,
348
		Swmaskvermask	= 0xff00,
349
		Swmaskvershift	= 8,
350
		Swmasksoftreset	= 1 << 31,
351
 
352
	Swregmode	= 8,
353
		Swdir615uboot	= 0x8d1003e0,
354
		/* from ubiquiti rspro */
355
		Swrgmiiport4iso	= 0x81461bea,
356
		Swrgmiiport4sw	= 0x01261be2,
357
		/* avm fritz!box 7390 */
358
		Swgmiiavm	= 0x010e5b71,
359
 
360
		Swmac0gmiien	= 1 <<  0,
361
		Swmac0rgmiien	= 1 <<  1,
362
		Swphy4gmiien	= 1 <<  2,
363
		Swphy4rgmiien	= 1 <<  3,
364
		Swmac0macmode	= 1 <<  4,
365
		Swrgmiirxclkdelayen= 1 <<  6,
366
		Swrgmiitxclkdelayen= 1 <<  7,
367
		Swmac5macmode	= 1 << 14,
368
		Swmac5phymode	= 1 << 15,
369
		Swtxdelays0	= 1 << 21,
370
		Swtxdelays1	= 1 << 22,
371
		Swrxdelays0	= 1 << 23,
372
		Swledopenen	= 1 << 24,
373
		Swspien		= 1 << 25,
374
		Swrxdelays1	= 1 << 26,
375
		Swpoweronsel	= 1 << 31,
376
 
377
	Swregfloodmask	= 0x2c,
378
		Swfloodmaskbcast2cpu= 1 << 26,
379
 
380
	Swregglobal	= 0x30,
381
		Swglobalmtumask	= 0x7fff,
382
};
383
 
384
#ifdef NOTYET
385
void *
386
devicegetparent(int)
387
{
388
	static int glop;
389
 
390
	return &glop;
391
}
392
 
393
static void
394
arswsplitsetpage(int dev, ulong addr, ushort *phy, ushort *reg)
395
{
396
	static Switch ar8316;
397
	Switch *sc = &ar8316;
398
	ushort page;
399
 
400
	page = ((addr) >> 9) & 0xffff;
401
	*phy = (((addr) >> 6) & 0x7) | 0x10;
402
	*reg = ((addr) >> 1) & 0x1f;
403
	MDIOWRREG(devicegetparent(dev), 0x18, 0, page);
404
	sc->page = page;
405
}
406
 
407
/*
408
 * Read half a register.  Some of the registers define control bits, and
409
 * the sequence of half-word accesses matters.  The register addresses
410
 * are word-even (mod 4).
411
 */
412
static int
413
arswrdreg16(int dev, int addr)
414
{
415
	ushort phy, reg;
416
 
417
	arswsplitsetpage(dev, addr, &phy, &reg);
418
	return MDIORDREG(devicegetparent(dev), phy, reg);
419
}
420
 
421
void
422
arswwritedbg(int dev, int phy, ushort dbgaddr, ushort dbgdata)
423
{
424
	MDIOWRREG(devicegetparent(dev), phy, Miiathdbgaddr, dbgaddr);
425
	MDIOWRREG(devicegetparent(dev), phy, Miiathdbgdata, dbgdata);
426
}
427
 
428
/*
429
 * Write half a register
430
 */
431
static inline int
432
arswwrreg16(int dev, int addr, int data)
433
{
434
	ushort phy, reg;
435
 
436
	arswsplitsetpage(dev, addr, &phy, &reg);
437
	return MDIOWRREG(devicegetparent(dev), phy, reg, data);
438
}
439
 
440
/* arsw??reglsb routines operate on lower 16 bits; *msb on upper ones */
441
 
442
int
443
arswrdreg(int dev, int addr)
444
{
445
	return arswrdreglsb(dev, addr) | arswrdregmsb(dev, addr);
446
}
447
 
448
int
449
arswwrreg(int dev, int addr, int value)
450
{
451
	arswwrreglsb(dev, addr, value);		/* XXX check this write too? */
452
	return arswwrregmsb(dev, addr, value);
453
}
454
 
455
int
456
arswmodifyreg(int dev, int addr, int mask, int set)
457
{
458
	return arswwrreg(dev, addr, (arswrdreg(dev, addr) & ~mask) | set);
459
}
460
 
461
/*
462
 * initialise the switch
463
 */
464
static int
465
ar8316init(Switch *sc)
466
{
467
	if (Swrgmii && Swphy4cpu) {
468
		arswwrreg(sc->scdev, Swregmode, Swrgmiiport4iso);
469
		iprint("ar8316: MAC port == RGMII, port 4 = dedicated PHY\n");
470
	} else if (Swrgmii) {
471
		arswwrreg(sc->scdev, Swregmode, Swrgmiiport4sw);
472
		iprint("ar8316: MAC port == RGMII, port 4 = switch port\n");
473
	} else if (Swgmii) {
474
		arswwrreg(sc->scdev, Swregmode, Swgmiiavm);
475
		iprint("ar8316: MAC port == GMII\n");
476
	} else {
477
		iprint("ar8316: unknown switch PHY config\n");
478
		return -1;
479
	}
480
 
481
	delay(1);			/* wait for things to settle */
482
 
483
	if (Swrgmii && Swphy4cpu) {
484
		iprint("ar8316: port 4 RGMII hack\n");
485
 
486
		/* work around for phy4 rgmii mode */
487
		arswwritedbg(sc->scdev, 4, 0x12, 0x480c);
488
		arswwritedbg(sc->scdev, 4, 0x0, 0x824e);	/* rx delay */
489
		arswwritedbg(sc->scdev, 4, 0x5, 0x3d47);	/* tx delay */
490
		delay(1);		/* again to let things settle */
491
	}
492
	arswwrreg(sc->scdev, 0x38, 0xc000050e);	/* mystery */
493
 
494
	/*
495
	 * Flood address table misses to all ports, and enable forwarding of
496
	 * broadcasts to the cpu port.
497
	 */
498
	arswwrreg(sc->scdev, Swregfloodmask, Swfloodmaskbcast2cpu | 0x003f003f);
499
	arswmodifyreg(sc->scdev, Swregglobal, Swglobalmtumask, ETHERMAXTU+8+2);
500
	return 0;
501
}
502
#endif			/* NOTYET */
503
 
504
static long
505
ifstat(Ether* edev, void* a, long n, ulong offset)
506
{
507
	int l, i, r;
508
	char *p;
509
	Ctlr *ctlr;
510
 
511
	ctlr = edev->ctlr;
512
	p = malloc(READSTR);
513
	if(p == nil)
514
		error(Enomem);
515
	l = 0;
516
	l += snprint(p+l, READSTR-l, "tintr: %ud\n", ctlr->tintr);
517
	l += snprint(p+l, READSTR-l, "rintr: %ud\n", ctlr->rintr);
518
	l += snprint(p+l, READSTR-l, "discarded: %ud\n", ctlr->discard);
519
 
520
	if(ctlr->mii != nil && ctlr->mii->curphy != nil){
521
		l += snprint(p+l, READSTR-l, "phy:   ");
522
		for(i = 0; i < NMiiPhyr; i++){
523
			if(i && ((i & 0x07) == 0))
524
				l += snprint(p+l, READSTR-l, "\n       ");
525
			r = miimir(ctlr->mii, i);
526
			l += snprint(p+l, READSTR-l, " %4.4uX", r);
527
		}
528
		snprint(p+l, READSTR-l, "\n");
529
	}
530
	n = readstr(offset, a, n, p);
531
	free(p);
532
 
533
	return n;
534
}
535
 
536
static void
537
etherrtrace(Netfile* f, Etherpkt* pkt, int len)
538
{
539
	int i, n;
540
	Block *bp;
541
 
542
	if(qwindow(f->in) <= 0)
543
		return;
544
	if(len > 58)
545
		n = 58;
546
	else
547
		n = len;
548
	bp = iallocb(64);
549
	if(bp == nil)
550
		return;
551
	memmove(bp->wp, pkt->d, n);
552
	i = TK2MS(MACHP(0)->ticks);
553
	bp->wp[58] = len>>8;
554
	bp->wp[59] = len;
555
	bp->wp[60] = i>>24;
556
	bp->wp[61] = i>>16;
557
	bp->wp[62] = i>>8;
558
	bp->wp[63] = i;
559
	bp->wp += 64;
560
	qpass(f->in, bp);
561
}
562
 
563
Block*
564
etheriq(Ether* ether, Block* bp, int fromwire)
565
{
566
	Etherpkt *pkt;
567
	ushort type;
568
	int len, multi, tome, fromme;
569
	Netfile **ep, *f, **fp, *fx;
570
	Block *xbp;
571
	Ctlr *ctlr;
572
 
573
	ether->inpackets++;
574
	ctlr = ether->ctlr;
575
 
576
	pkt = (Etherpkt*)bp->rp;
577
	len = BLEN(bp);
578
	type = (pkt->type[0]<<8)|pkt->type[1];
579
	fx = 0;
580
	ep = &ether->f[Ntypes];
581
 
582
	multi = pkt->d[0] & 1;
583
	/* check for valid multicast addresses */
584
	if(multi && memcmp(pkt->d, ether->bcast, sizeof(pkt->d)) != 0 &&
585
	    ether->prom == 0)
586
		if(!activemulti(ether, pkt->d, sizeof(pkt->d))){
587
			if(fromwire){
588
				ctlr->discard++;
589
				freeb(bp);
590
				bp = 0;
591
			}
592
			return bp;
593
		}
594
 
595
	/* is it for me? */
596
	tome   = memcmp(pkt->d, ether->ea, sizeof(pkt->d)) == 0;
597
	fromme = memcmp(pkt->s, ether->ea, sizeof(pkt->s)) == 0;
598
 
599
	/*
600
	 * Multiplex the packet to all the connections which want it.
601
	 * If the packet is not to be used subsequently (fromwire != 0),
602
	 * attempt to simply pass it into one of the connections, thereby
603
	 * saving a copy of the data (usual case hopefully).
604
	 */
605
	for(fp = ether->f; fp < ep; fp++)
606
		if((f = *fp) != nil && (f->type == type || f->type < 0))
607
		if(tome || multi || f->prom)
608
			/* Don't want to hear bridged packets */
609
			if(f->bridge && !fromwire && !fromme)
610
				continue;
611
			else if(f->headersonly)
612
				etherrtrace(f, pkt, len);
613
			else if(fromwire && fx == 0)
614
				fx = f;
615
			else if(xbp = iallocb(len)){
616
				memmove(xbp->wp, pkt, len);
617
				xbp->wp += len;
618
				if(qpass(f->in, xbp) < 0){
619
					iprint("soverflow for f->in\n");
620
					ether->soverflows++;
621
				}
622
			}else{
623
				iprint("soverflow iallocb\n");
624
				ether->soverflows++;
625
			}
626
	if(fx){
627
		if(qpass(fx->in, bp) < 0){
628
			iprint("soverflow for fx->in\n");
629
			ether->soverflows++;
630
		}
631
		return 0;
632
	}
633
	if(fromwire){
634
		ctlr->discard++;
635
		freeb(bp);
636
		return 0;
637
	}
638
	return bp;
639
}
640
 
641
static void
642
athhwreset(Ether *ether)
643
{
644
	Ctlr *ctlr;
645
	Arge *arge;
646
 
647
	ctlr = ether->ctlr;
648
	if (ctlr == nil)
649
		return;
650
	arge = ctlr->regs;
651
	if (arge == nil)
652
		return;
653
 
654
	arge->dmaintr = 0;
655
 
656
	arge->rxctl = 0;
657
	arge->txctl = 0;
658
	coherence();
659
 
660
	/*
661
	 * give tx & rx time to stop, otherwise clearing desc registers
662
	 * too early will cause random memory corruption.
663
	 */
664
	delay(1);
665
 
666
	arge->rxdesc = 0;
667
	arge->txdesc = 0;
668
	coherence();
669
 
670
	/* clear all interrupts */
671
	while (arge->rxsts & Rxpktrcvd)
672
		arge->rxsts = Rxpktrcvd;
673
	while (arge->txsts & Txpktsent)
674
		arge->txsts = Txpktsent;
675
 
676
	/* and errors */
677
	arge->rxsts = Rxbuserr | Rxovflo;
678
	arge->txsts = Txbuserr | Txunderrun;
679
}
680
 
681
static void
682
txreclaim(Ctlr *ctlr)
683
{
684
	uint tdh;
685
	Arge *arge;
686
	Block *bp;
687
 
688
	arge = ctlr->regs;
689
	tdh = ctlr->tdh;
690
	while (tdh != ctlr->tdt && ctlr->tdba[tdh].ctl & Descempty){
691
		arge->txsts = Txpktsent;
692
 
693
		bp = ctlr->td[tdh];
694
		ctlr->td[tdh] = nil;
695
		if (bp)
696
			freeb(bp);
697
 
698
		ctlr->tdba[tdh].addr = 0;
699
		ctlr->ntq--;
700
		tdh = NEXT(tdh, Ntd);
701
	}
702
	ctlr->tdh = tdh;
703
}
704
 
705
static Block*
706
athrballoc(void)
707
{
708
	Block *bp;
709
 
710
	ilock(&athrblock);
711
	if((bp = athrbpool) != nil){
712
		athrbpool = bp->next;
713
		bp->next = nil;
714
		_xinc(&bp->ref);	/* prevent bp from being freed */
715
	}
716
	iunlock(&athrblock);
717
	return bp;
718
}
719
 
720
static void
721
athrbfree(Block* bp)
722
{
723
	bp->wp = bp->rp = bp->lim - ROUND(Rbsz, BLOCKALIGN);
724
	bp->flag &= ~(Bipck | Budpck | Btcpck | Bpktck);
725
 
726
	ilock(&athrblock);
727
	bp->next = athrbpool;
728
	athrbpool = bp;
729
	iunlock(&athrblock);
730
}
731
 
732
static void
733
rxnewbuf(Ctlr *ctlr, int i)
734
{
735
	Block *bp;
736
	Desc *rd;
737
 
738
	if (ctlr->rd[i] != nil)
739
		return;
740
	ctlr->rd[i] = bp = athrballoc();
741
	if(bp == nil)
742
		panic("#l%d: can't allocate receive buffer",
743
			ctlr->edev->ctlrno);
744
	dcflush(bp->rp, Rbsz);		/* writeback & invalidate */
745
 
746
	rd = &ctlr->rdba[i];
747
	rd->addr = PADDR(bp->rp);
748
	rd->ctl = Descempty | DMASIZE(Rbsz);
749
	ctlr->nrdfree++;
750
}
751
 
752
static void
753
rxreclaim(Ctlr *ctlr)
754
{
755
	uint rdt;
756
 
757
	rdt = ctlr->rdt;
758
	while (rdt != ctlr->rdh && !(ctlr->rdba[rdt].ctl & Descempty)){
759
		rxnewbuf(ctlr, rdt);
760
		rdt = NEXT(rdt, Nrd);
761
	}
762
	ctlr->rdt = rdt;
763
}
764
 
765
static void
766
etherintr(void *arg)
767
{
768
	int sts;
769
	Arge *arge;
770
	Ctlr *ctlr;
771
	Ether *ether;
772
 
773
	ether = arg;
774
	ctlr = ether->ctlr;
775
	arge = ctlr->regs;
776
	ilock(ctlr);
777
	sts = arge->dmaintrsts;
778
	if (sts & Dmarxpktrcvd) {
779
		arge->dmaintr &= ~Dmarxpktrcvd;
780
		ctlr->pktstoread = 1;
781
		wakeup(&ctlr->rrendez);
782
		ctlr->rintr++;
783
		sts &= ~Dmarxpktrcvd;
784
	}
785
	if (sts & (Dmatxpktsent | Dmatxunderrun)) {
786
		arge->dmaintr &= ~(Dmatxpktsent | Dmatxunderrun);
787
		ctlr->pktstosend = 1;
788
		wakeup(&ctlr->trendez);
789
		ctlr->tintr++;
790
		sts &= ~(Dmatxpktsent | Dmatxunderrun);
791
	}
792
	iunlock(ctlr);
793
	if (sts)
794
		iprint("#l%d: sts %#ux\n", ether->ctlrno, sts);
795
}
796
 
797
static int
798
pktstoread(void* v)
799
{
800
	Ctlr *ctlr = v;
801
 
802
	return ctlr->pktstoread || !(ctlr->rdba[ctlr->rdh].ctl & Descempty);
803
}
804
 
805
static void
806
rproc(void* arg)
807
{
808
	uint rdh, sz;
809
	Arge *arge;
810
	Block *bp;
811
	Ctlr *ctlr;
812
	Desc *rd;
813
	Ether *edev;
814
 
815
	edev = arg;
816
	ctlr = edev->ctlr;
817
	arge = ctlr->regs;
818
	for(;;){
819
		/* wait for next interrupt */
820
		ilock(ctlr);
821
		arge->dmaintr |= Dmarxpktrcvd;
822
		iunlock(ctlr);
823
 
824
		sleep(&ctlr->rrendez, pktstoread, ctlr);
825
		ctlr->pktstoread = 0;
826
 
827
		rxreclaim(ctlr);
828
		rdh = ctlr->rdh;
829
		for (rd = &ctlr->rdba[rdh]; !(rd->ctl & Descempty);
830
		     rd = &ctlr->rdba[rdh]){
831
			bp = ctlr->rd[rdh];
832
			assert(bp != nil);
833
			ctlr->rd[rdh] = nil;
834
 
835
			/* omit final 4 bytes (crc), pass pkt upstream */
836
			sz = DMASIZE(rd->ctl) - 4;
837
			assert(sz > 0 && sz <= Rbsz);
838
			bp->wp = bp->rp + sz;
839
			bp = etheriq(edev, bp, 1);
840
			assert(bp == nil);		/* Block was consumed */
841
 
842
			arge->rxsts = Rxpktrcvd;
843
 
844
			ctlr->nrdfree--;
845
			rdh = NEXT(rdh, Nrd);
846
			if(ctlr->nrdfree < Nrd/2) {
847
				/* rxreclaim reads ctlr->rdh */
848
				ctlr->rdh = rdh;
849
				rxreclaim(edev->ctlr);
850
			}
851
		}
852
		ctlr->rdh = rdh;
853
	}
854
}
855
 
856
static int
857
pktstosend(void* v)
858
{
859
	Ether *edev = v;
860
	Ctlr *ctlr = edev->ctlr;
861
 
862
	return ctlr->pktstosend || ctlr->ntq > 0 || qlen(edev->oq) > 0;
863
}
864
 
865
static void
866
tproc(void* arg)
867
{
868
	uint tdt, added;
869
	Arge *arge;
870
	Block *bp;
871
	Ctlr *ctlr;
872
	Desc *td;
873
	Ether *edev;
874
 
875
	edev = arg;
876
	ctlr = edev->ctlr;
877
	arge = ctlr->regs;
878
	for(;;){
879
		/* wait for next free buffer and output queue block */
880
		sleep(&ctlr->trendez, pktstosend, edev);
881
		ctlr->pktstosend = 0;
882
 
883
		txreclaim(ctlr);
884
 
885
		/* copy as much of my output q as possible into output ring */
886
		added = 0;
887
		tdt = ctlr->tdt;
888
		while(ctlr->ntq < Ntd - 1){
889
			td = &ctlr->tdba[tdt];
890
			if (!(td->ctl & Descempty))
891
				break;
892
			bp = qget(edev->oq);
893
			if(bp == nil)
894
				break;
895
 
896
			/* make sure the whole packet is in ram */
897
			dcflush(bp->rp, BLEN(bp));
898
 
899
			/*
900
			 * Give ownership of the descriptor to the chip,
901
			 * increment the software ring descriptor pointer.
902
			 */
903
			ctlr->td[tdt] = bp;
904
			td->addr = PADDR(bp->rp);
905
			td->ctl = DMASIZE(BLEN(bp));
906
			coherence();
907
 
908
			added++;
909
			ctlr->ntq++;
910
			tdt = NEXT(tdt, Ntd);
911
		}
912
		ctlr->tdt = tdt;
913
		/*
914
		 * Underrun turns off TX.  Clear underrun indication.
915
		 * If there's anything left in the ring, reactivate the tx.
916
		 */
917
		if (arge->dmaintrsts & Dmatxunderrun)
918
			arge->txsts = Txunderrun;
919
		if(1 || added)
920
			arge->txctl = Dmatxctlen;	/* kick xmiter */
921
		ilock(ctlr);
922
		if(ctlr->ntq >= Ntd/2)			/* tx ring half-full? */
923
			arge->dmaintr |= Dmatxpktsent;
924
		else if (ctlr->ntq > 0)
925
			arge->dmaintr |= Dmatxunderrun;
926
		iunlock(ctlr);
927
		txreclaim(ctlr);
928
	}
929
}
930
 
931
/*
932
 *  turn promiscuous mode on/off
933
 */
934
static void
935
promiscuous(void *ve, int on)
936
{
937
	USED(ve, on);
938
}
939
 
940
static void
941
multicast(void *ve, uchar*, int on)
942
{
943
	USED(ve, on);
944
}
945
 
946
static void
947
linkdescs(Desc *base, int ndesc)
948
{
949
	int i;
950
 
951
	for(i = 0; i < ndesc - 1; i++)
952
		base[i].next = (Desc *)PADDR(&base[i+1]);
953
	base[ndesc - 1].next = (Desc *)PADDR(&base[0]);
954
}
955
 
956
/*
957
 * Initialise the receive and transmit buffer rings.
958
 *
959
 * This routine is protected by ctlr->init.
960
 */
961
static void
962
ringinit(Ctlr* ctlr)
963
{
964
	int i;
965
	void *v;
966
 
967
	if(ctlr->rdba == 0){
968
		v = xspanalloc(Nrd * sizeof(Desc), CACHELINESZ, 0);
969
		assert(v);
970
		ctlr->rdba = (Desc *)KSEG1ADDR(v);
971
		ctlr->rd = xspanalloc(Nrd * sizeof(Block *), 0, 0);
972
		assert(ctlr->rd != nil);
973
		linkdescs(ctlr->rdba, Nrd);
974
		for(i = 0; i < Nrd; i++)
975
			rxnewbuf(ctlr, i);
976
	}
977
	ctlr->rdt = ctlr->rdh = 0;
978
 
979
	if(ctlr->tdba == 0) {
980
		v = xspanalloc(Ntd * sizeof(Desc), CACHELINESZ, 0);
981
		assert(v);
982
		ctlr->tdba = (Desc *)KSEG1ADDR(v);
983
		ctlr->td = xspanalloc(Ntd * sizeof(Block *), 0, 0);
984
		assert(ctlr->td != nil);
985
	}
986
	memset(ctlr->td, 0, Ntd * sizeof(Block *));
987
 
988
	linkdescs(ctlr->tdba, Ntd);
989
	for(i = 0; i < Ntd; i++)
990
		ctlr->tdba[i].ctl = Descempty;
991
 
992
	ctlr->tdh = ctlr->tdt = 0;
993
}
994
 
995
static void
996
cfgmediaduplex(Ether *ether)
997
{
998
	Arge *arge, *arge0;
999
	Ctlr *ctlr;
1000
 
1001
	ctlr = ether->ctlr;
1002
	arge = ctlr->regs;
1003
	arge->cfg2 = (arge->cfg2 & ~Cfg2ifmode10_100) | Cfg2ifmode1000 | Cfg2fdx;
1004
	arge->ifctl &= ~Ifctlspeed;
1005
	arge->fiforxfiltmask |= Frmbytemode;
1006
	arge->fifotxthresh = 0x008001ff;	/* undocumented magic */
1007
 
1008
	if (ether->ctlrno > 0) {
1009
		/* set PLL registers: copy from arge0 */
1010
		arge0 = (Arge *)(KSEG1 | etherifs[0].regs);
1011
		USED(arge0);
1012
	}
1013
}
1014
 
1015
static void
1016
athmii(Ether *ether, int phymask)
1017
{
1018
	USED(ether, phymask);
1019
}
1020
 
1021
static void
1022
athcfg(Ether *ether, int phymask)
1023
{
1024
	uchar *eaddr;
1025
	Arge *arge;
1026
	Ctlr *ctlr;
1027
 
1028
	ctlr = ether->ctlr;
1029
	arge = ctlr->regs;
1030
	if(ether->ctlrno > 0){
1031
		if(0){
1032
			/* doing this seems to disable both ethers */
1033
			arge->cfg1 |= Cfg1softrst;		/* stop */
1034
			delay(20);
1035
			*Reset |= Rstge1mac;
1036
			delay(100);
1037
		}
1038
		*Reset &= ~Rstge1mac;
1039
		delay(200);
1040
	}
1041
 
1042
	/* configure */
1043
	arge->cfg1 = Cfg1syncrx | Cfg1rxen | Cfg1synctx | Cfg1txen;
1044
	arge->cfg2 |= Cfg2enpadcrc | Cfg2lenfield | Cfg2encrc;
1045
	arge->maxframelen = Rbsz;
1046
 
1047
	if(ether->ctlrno > 0){
1048
		arge->miicfg = Miicfgrst;
1049
		delay(100);
1050
		arge->miicfg = Miicfgclkdiv28;
1051
		delay(100);
1052
	}
1053
 
1054
	/*
1055
	 * Set all Ethernet address registers to the same initial values
1056
	 * set all four addresses to 66-88-aa-cc-dd-ee
1057
	 */
1058
	eaddr = ether->ea;
1059
	arge->staaddr1 = eaddr[2]<<24 | eaddr[3]<<16 | eaddr[4]<<8  | eaddr[5];
1060
	arge->staaddr2 = eaddr[0]<< 8 | eaddr[1];
1061
 
1062
	arge->fifocfg[0] = Fifocfg0all << Fifocfg0enshift; /* undocumented magic */
1063
	arge->fifocfg[1] = 0x0fff0000;	/* undocumented magic */
1064
	arge->fifocfg[2] = 0x00001fff;	/* undocumented magic */
1065
 
1066
	arge->fiforxfiltmatch = Ffmatchdflt;
1067
	arge->fiforxfiltmask  = Ffmaskdflt;
1068
 
1069
	/* phy goo */
1070
	athmii(ether, phymask);
1071
	if (ether->ctlrno > 0)
1072
		cfgmediaduplex(ether);
1073
}
1074
 
1075
static int
1076
athattach(Ether *ether)
1077
{
1078
	int i;
1079
	char name[32];
1080
	Arge *arge;
1081
	Block *bp;
1082
	Ctlr *ctlr;
1083
 
1084
	ctlr = ether->ctlr;
1085
	if (ctlr->attached)
1086
		return -1;
1087
	ilock(ctlr);
1088
	ctlr->init = 1;
1089
	for(i = 0; i < Nrb; i++){
1090
		if((bp = allocb(Rbsz + Bufalign)) == nil)
1091
			error(Enomem);
1092
		bp->free = athrbfree;
1093
		freeb(bp);
1094
	}
1095
	ringinit(ctlr);
1096
	ctlr->init = 0;
1097
	iunlock(ctlr);
1098
 
1099
	athcfg(ether, ctlr->phymask);
1100
 
1101
	/* start */
1102
	arge = ctlr->regs;
1103
	arge->txdesc = PADDR(ctlr->tdba);
1104
	arge->rxdesc = PADDR(ctlr->rdba);
1105
	coherence();
1106
	arge->rxctl = Dmarxctlen;
1107
 
1108
	snprint(name, KNAMELEN, "#l%drproc", ether->ctlrno);
1109
	kproc(name, rproc, ether);
1110
 
1111
	snprint(name, KNAMELEN, "#l%dtproc", ether->ctlrno);
1112
	kproc(name, tproc, ether);
1113
 
1114
	ilock(ctlr);
1115
	arge->dmaintr |= Dmaall;
1116
	iunlock(ctlr);
1117
 
1118
	ctlr->attached = 1;
1119
	return 0;
1120
}
1121
 
1122
/*
1123
 * strategy: RouterBOOT has initialised arge0, try to leave it alone.
1124
 * copy arge0 registers to arge1, with a few exceptions.
1125
 */
1126
static int
1127
athreset(Ether *ether)
1128
{
1129
	Arge *arge;
1130
	Ctlr *ctlr;
1131
	Etherif *ep;
1132
 
1133
	if (ether->ctlrno < 0 || ether->ctlrno >= MaxEther)
1134
		return -1;
1135
	if (ether->ctlr == nil) {
1136
		/*
1137
		 * Allocate a controller structure and start to initialise it.
1138
		 */
1139
		ether->ctlr = ctlr = malloc(sizeof(Ctlr));
1140
		if (ctlr == nil)
1141
			return -1;
1142
		ctlr->edev = ether;
1143
		ep = etherifs + ether->ctlrno;
1144
		ctlr->regs = arge = (Arge *)(KSEG1 | ep->regs);
1145
		ctlr->phymask = ep->phymask;
1146
 
1147
		ether->port = (uint)arge;
1148
		ether->irq = ep->irq;
1149
		memmove(ether->ea, ep->mac, Eaddrlen);
1150
		ether->ifstat = ifstat;
1151
		ether->promiscuous = promiscuous;
1152
		ether->multicast = multicast;
1153
		ether->arg = ether;
1154
	}
1155
	athhwreset(ether);
1156
	return 0;
1157
}
1158
 
1159
static Ether*
1160
etherprobe(int ctlrno)
1161
{
1162
	int i, lg;
1163
	ulong mb, bsz;
1164
	Ether *ether;
1165
	char buf[128], name[32];
1166
 
1167
	ether = malloc(sizeof(Ether));
1168
	if(ether == nil)
1169
		error(Enomem);
1170
	memset(ether, 0, sizeof(Ether));
1171
	ether->ctlrno = ctlrno;
1172
	ether->tbdf = BUSUNKNOWN;
1173
	ether->mbps = 1000;
1174
	ether->minmtu = ETHERMINTU;
1175
	ether->maxmtu = ETHERMAXTU;
1176
	ether->mtu = ETHERMAXTU;
1177
 
1178
	if(ctlrno >= MaxEther || athreset(ether) < 0){
1179
		free(ether);
1180
		return nil;
1181
	}
1182
 
1183
	snprint(name, sizeof(name), "ether%d", ctlrno);
1184
 
1185
	/*
1186
	 * If ether->irq is <0, it is a hack to indicate no interrupt
1187
	 * used by ethersink.
1188
	 * apparently has to be done here and cannot be deferred until attach.
1189
	 */
1190
	if(ether->irq >= 0)
1191
		intrenable(ether->irq, etherintr, ether);
1192
 
1193
	i = sprint(buf, "#l%d: atheros71xx: ", ctlrno);
1194
	if(ether->mbps >= 1000)
1195
		i += sprint(buf+i, "%dGbps", ether->mbps/1000);
1196
	else
1197
		i += sprint(buf+i, "%dMbps", ether->mbps);
1198
	i += sprint(buf+i, " port %#luX irq %d", PADDR(ether->port), ether->irq);
1199
	i += sprint(buf+i, ": %2.2ux%2.2ux%2.2ux%2.2ux%2.2ux%2.2ux",
1200
		ether->ea[0], ether->ea[1], ether->ea[2],
1201
		ether->ea[3], ether->ea[4], ether->ea[5]);
1202
	sprint(buf+i, "\n");
1203
	print(buf);
1204
 
1205
	/*
1206
	 * input queues are allocated by ../port/netif.c:/^openfile.
1207
	 * the size will be the last argument to netifinit() below.
1208
	 *
1209
	 * output queues should be small, to minimise `bufferbloat',
1210
	 * which confuses tcp's feedback loop.  at 1Gb/s, it only takes
1211
	 * ~15µs to transmit a full-sized non-jumbo packet.
1212
	 */
1213
 
1214
	/* compute log10(ether->mbps) into lg */
1215
	for(lg = 0, mb = ether->mbps; mb >= 10; lg++)
1216
		mb /= 10;
1217
	if (lg > 13)			/* sanity cap; 2**(13+16) = 2²⁹ */
1218
		lg = 13;
1219
 
1220
	/* allocate larger input queues for higher-speed interfaces */
1221
	bsz = 1UL << (lg + 16);		/* 2ⁱ⁶ = 64K, bsz = 2ⁿ × 64K */
1222
	while (bsz > mainmem->maxsize / 8 && bsz > 128*1024)	/* sanity */
1223
		bsz /= 2;
1224
	netifinit(ether, name, Ntypes, bsz);
1225
 
1226
	if(ether->oq == nil)
1227
		ether->oq = qopen(1 << (lg + 13), Qmsg, 0, 0);
1228
	if(ether->oq == nil)
1229
		panic("etherreset %s: can't allocate output queue", name);
1230
 
1231
	ether->alen = Eaddrlen;
1232
	memmove(ether->addr, ether->ea, Eaddrlen);
1233
	memset(ether->bcast, 0xFF, Eaddrlen);
1234
	return ether;
1235
}
1236
 
1237
static void
1238
etherreset(void)
1239
{
1240
	int ctlrno;
1241
 
1242
	for(ctlrno = 0; ctlrno < MaxEther; ctlrno++)
1243
		etherxx[ctlrno] = etherprobe(ctlrno);
1244
}
1245
 
1246
static void
1247
ethershutdown(void)
1248
{
1249
	Ether *ether;
1250
	int i;
1251
 
1252
	for(i = 0; i < MaxEther; i++){
1253
		ether = etherxx[i];
1254
		if(ether)
1255
			athhwreset(ether);
1256
	}
1257
}
1258
 
1259
static Chan *
1260
etherattach(char* spec)
1261
{
1262
	ulong ctlrno;
1263
	char *p;
1264
	Chan *chan;
1265
 
1266
	ctlrno = 0;
1267
	if(spec && *spec){
1268
		ctlrno = strtoul(spec, &p, 0);
1269
		if((ctlrno == 0 && p == spec) || *p || (ctlrno >= MaxEther))
1270
			error(Ebadarg);
1271
	}
1272
	if(etherxx[ctlrno] == 0)
1273
		error(Enodev);
1274
 
1275
	chan = devattach('l', spec);
1276
	if(waserror()){
1277
		chanfree(chan);
1278
		nexterror();
1279
	}
1280
	chan->dev = ctlrno;
1281
	athattach(etherxx[ctlrno]);
1282
	poperror();
1283
	return chan;
1284
}
1285
 
1286
static Walkqid*
1287
etherwalk(Chan *c, Chan *nc, char **name, int nname)
1288
{
1289
	return netifwalk(etherxx[c->dev], c, nc, name, nname);
1290
}
1291
 
1292
static Chan*
1293
etheropen(Chan *c, int omode)
1294
{
1295
	return netifopen(etherxx[c->dev], c, omode);
1296
}
1297
 
1298
static void
1299
ethercreate(Chan*, char*, int, ulong)
1300
{
1301
}
1302
 
1303
static void
1304
etherclose(Chan *c)
1305
{
1306
	netifclose(etherxx[c->dev], c);
1307
}
1308
 
1309
static long
1310
etherread(Chan *chan, void *buf, long n, vlong off)
1311
{
1312
	Ether *ether;
1313
	ulong offset = off;
1314
 
1315
	ether = etherxx[chan->dev];
1316
	if((chan->qid.type & QTDIR) == 0 && ether->ifstat){
1317
		/*
1318
		 * With some controllers it is necessary to reach
1319
		 * into the chip to extract statistics.
1320
		 */
1321
		if(NETTYPE(chan->qid.path) == Nifstatqid)
1322
			return ether->ifstat(ether, buf, n, offset);
1323
		else if(NETTYPE(chan->qid.path) == Nstatqid)
1324
			ether->ifstat(ether, buf, 0, offset);
1325
	}
1326
 
1327
	return netifread(ether, chan, buf, n, offset);
1328
}
1329
 
1330
static Block*
1331
etherbread(Chan *c, long n, ulong offset)
1332
{
1333
	return netifbread(etherxx[c->dev], c, n, offset);
1334
}
1335
 
1336
/* kick the transmitter to drain the output ring */
1337
static void
1338
athtransmit(Ether* ether)
1339
{
1340
	Ctlr *ctlr;
1341
 
1342
	ctlr = ether->ctlr;
1343
	ilock(ctlr);
1344
	ctlr->pktstosend = 1;
1345
	wakeup(&ctlr->trendez);
1346
	iunlock(ctlr);
1347
}
1348
 
1349
static long (*athctl)(Ether *, char *, int) = nil;
1350
 
1351
static int
1352
etheroq(Ether* ether, Block* bp)
1353
{
1354
	int len, loopback, s;
1355
	Etherpkt *pkt;
1356
 
1357
	ether->outpackets++;
1358
 
1359
	/*
1360
	 * Check if the packet has to be placed back onto the input queue,
1361
	 * i.e. if it's a loopback or broadcast packet or the interface is
1362
	 * in promiscuous mode.
1363
	 * If it's a loopback packet indicate to etheriq that the data isn't
1364
	 * needed and return, etheriq will pass-on or free the block.
1365
	 * To enable bridging to work, only packets that were originated
1366
	 * by this interface are fed back.
1367
	 */
1368
	pkt = (Etherpkt*)bp->rp;
1369
	len = BLEN(bp);
1370
	loopback = memcmp(pkt->d, ether->ea, sizeof(pkt->d)) == 0;
1371
	if(loopback || memcmp(pkt->d, ether->bcast, sizeof(pkt->d)) == 0 || ether->prom){
1372
		s = splhi();
1373
		etheriq(ether, bp, 0);
1374
		splx(s);
1375
	}
1376
 
1377
	if(!loopback){
1378
		if(qfull(ether->oq))
1379
			print("etheroq: WARNING: ether->oq full!\n");
1380
		qbwrite(ether->oq, bp);
1381
		athtransmit(ether);
1382
	} else
1383
		freeb(bp);
1384
 
1385
	return len;
1386
}
1387
 
1388
static long
1389
etherwrite(Chan* chan, void* buf, long n, vlong)
1390
{
1391
	Ether *ether;
1392
	Block *bp;
1393
	int nn, onoff;
1394
	Cmdbuf *cb;
1395
 
1396
	ether = etherxx[chan->dev];
1397
	if(NETTYPE(chan->qid.path) != Ndataqid) {
1398
		nn = netifwrite(ether, chan, buf, n);
1399
		if(nn >= 0)
1400
			return nn;
1401
		cb = parsecmd(buf, n);
1402
		if(cb->f[0] && strcmp(cb->f[0], "nonblocking") == 0){
1403
			if(cb->nf <= 1)
1404
				onoff = 1;
1405
			else
1406
				onoff = atoi(cb->f[1]);
1407
			qnoblock(ether->oq, onoff);
1408
			free(cb);
1409
			return n;
1410
		}
1411
		free(cb);
1412
		if(athctl != nil)
1413
			return athctl(ether, buf, n);
1414
		error(Ebadctl);
1415
	}
1416
 
1417
	assert(ether->ctlr != nil);
1418
	if(n > ether->mtu)
1419
		error(Etoobig);
1420
	if(n < ether->minmtu)
1421
		error(Etoosmall);
1422
 
1423
	bp = allocb(n);
1424
	if(waserror()){
1425
		freeb(bp);
1426
		nexterror();
1427
	}
1428
	memmove(bp->rp, buf, n);
1429
	memmove(bp->rp+Eaddrlen, ether->ea, Eaddrlen);
1430
	poperror();
1431
	bp->wp += n;
1432
 
1433
	return etheroq(ether, bp);
1434
}
1435
 
1436
static long
1437
etherbwrite(Chan *c, Block *bp, ulong offset)
1438
{
1439
	return devbwrite(c, bp, offset);
1440
}
1441
 
1442
static int
1443
etherstat(Chan *c, uchar *dp, int n)
1444
{
1445
	return netifstat(etherxx[c->dev], c, dp, n);
1446
}
1447
 
1448
static int
1449
etherwstat(Chan *c, uchar *dp, int n)
1450
{
1451
	return netifwstat(etherxx[c->dev], c, dp, n);
1452
}
1453
 
1454
Dev etherdevtab = {
1455
	'l',
1456
	"ether",
1457
 
1458
	etherreset,
1459
	devinit,
1460
	ethershutdown,
1461
	etherattach,
1462
	etherwalk,
1463
	etherstat,
1464
	etheropen,
1465
	ethercreate,
1466
	etherclose,
1467
	etherread,
1468
	etherbread,
1469
	etherwrite,
1470
	etherbwrite,
1471
	devremove,
1472
	etherwstat,
1473
	devpower,
1474
	devconfig,
1475
};