Subversion Repositories planix.SVN

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
68 7u83 1
#include <sys/param.h>
2
#include <sys/sockio.h>
3
#include <sys/proc.h>
4
#include <sys/vnode.h>
5
#include <sys/kernel.h>
6
#include <sys/sysctl.h>
7
#include <sys/malloc.h>
8
#include <sys/mount.h>
9
#include <sys/mbuf.h>
10
#include <sys/socket.h>
11
#include <sys/socketvar.h>
12
#include <sys/systm.h>
13
#include <sys/protosw.h>
14
#include <sys/syslog.h>
15
 
16
#include <netinet/in.h>
17
#include <netinet/tcp.h>
18
 
19
#include <vm/vm.h>
20
#include <vm/vm_extern.h>
21
#include <vm/vm_zone.h>
22
 
23
#include <net/if.h>
24
#include <net/route.h>
25
#include <netinet/in.h>
26
 
27
#include <9fs/bitstring.h>
28
#include <9fs/9p.h>
29
#include <9fs/9auth.h>
30
#include <9fs/9fs.h>
31
 
32
static int u9fs_reply __P((struct u9fsreq * req));
33
static int u9fs_send __P((struct socket * so, struct mbuf * mreq, struct u9fsreq * req));
34
static int u9fs_receive __P((struct socket * so, struct mbuf **mrep, struct u9fsreq * req));
35
 
36
static int u9fs_sndlock __P((int *flagp, int *statep, struct u9fsreq *rep));
37
static void u9fs_sndunlock __P((int *flagp, int *statep));
38
static int u9fs_rcvlock __P((struct u9fsreq *req));
39
static void u9fs_rcvunlock __P((int *flagp, int *statep));
40
 
41
int
42
u9fs_connect(struct socket ** sop, struct sockaddr * saddr, int sotype, int soproto, struct proc * p)
43
{
44
  register struct socket * so;
45
  int error, s;
46
 
47
  *sop = 0;
48
  error = socreate(saddr->sa_family, sop, sotype, soproto, p);
49
  if( error )
50
    return error;
51
  so = *sop;
52
  error = soconnect(so, saddr, p);
53
  if( error )
54
    return error;
55
 
56
  /*
57
   * Wait for the connection to complete. Cribbed from the
58
   * connect system call but with the wait timing out so
59
   * that interruptible mounts don't hang here for a long time.
60
   */
61
  s = splnet();
62
  while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0)
63
    (void) tsleep((caddr_t)&so->so_timeo, PSOCK,
64
		  "u9fscon", 2 * hz);
65
 
66
  if (so->so_error) {
67
    error = so->so_error;
68
    so->so_error = 0;
69
    splx(s);
70
    return error;
71
  }
72
  splx(s);
73
 
74
  return (0);
75
}
76
 
77
int u9fs_connect_9auth(struct u9fsmount * nmp, struct u9fs_args * argp, struct socket ** sop)
78
{
79
  int error;
80
  struct proc * p = & proc0;
81
  struct sockaddr *nam;
82
 
83
  error = getsockaddr(&nam, (caddr_t)argp->authaddr, argp->authaddrlen);
84
  if( error )
85
    return error;
86
  error = u9fs_connect(sop, nam, argp->authsotype, 
87
		       argp->authsoproto, p);
88
  if( error == 0 )
89
    return 0;
90
 
91
  u9fs_disconnect(*sop);
92
  *sop = 0;
93
  return error;
94
}
95
 
96
/*
97
 * Initialize sockets and congestion for a new U9FS connection.
98
 * We do not free the sockaddr if error.
99
 */
100
int
101
u9fs_connect_9fs(nmp)
102
     register struct u9fsmount *nmp;
103
{
104
  register struct socket *so;
105
  int error, rcvreserve, sndreserve;
106
  struct proc *p = &proc0; /* only used for socreate and sobind */
107
 
108
  error = u9fs_connect(&nmp->nm_so, nmp->nm_nam, nmp->nm_sotype, 
109
		       nmp->nm_soproto, p);
110
  if (error)
111
    goto bad;
112
  so = nmp->nm_so;
113
  nmp->nm_soflags = so->so_proto->pr_flags;
114
 
115
  if (nmp->nm_flag & (U9FSMNT_SOFT | U9FSMNT_INT)) {
116
    so->so_rcv.sb_timeo = (5 * hz);
117
    so->so_snd.sb_timeo = (5 * hz);
118
  } else {
119
    so->so_rcv.sb_timeo = 0;
120
    so->so_snd.sb_timeo = 0;
121
  }
122
 
123
  /* XXX: i dont understand this, only one outstanding request? */
124
  if (nmp->nm_sotype == SOCK_SEQPACKET) {
125
    sndreserve = (nmp->nm_wsize) * 2;
126
    rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize)) * 2;
127
  } else {
128
    if (nmp->nm_sotype != SOCK_STREAM)
129
      panic("u9fscon sotype");
130
    if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
131
      struct sockopt sopt;
132
      int val;
133
 
134
      bzero(&sopt, sizeof sopt);
135
      sopt.sopt_level = SOL_SOCKET;
136
      sopt.sopt_name = SO_KEEPALIVE;
137
      sopt.sopt_val = &val;
138
      sopt.sopt_valsize = sizeof val;
139
      val = 1;
140
      sosetopt(so, &sopt);
141
    }
142
    if (so->so_proto->pr_protocol == IPPROTO_TCP) {
143
      struct sockopt sopt;
144
      int val;
145
 
146
      bzero(&sopt, sizeof sopt);
147
      sopt.sopt_level = IPPROTO_TCP;
148
      sopt.sopt_name = TCP_NODELAY;
149
      sopt.sopt_val = &val;
150
      sopt.sopt_valsize = sizeof val;
151
      val = 1;
152
      sosetopt(so, &sopt);
153
    }
154
    sndreserve = (nmp->nm_wsize) * 2;
155
    rcvreserve = (nmp->nm_rsize) * 2;
156
  }
157
  error = soreserve(so, sndreserve, rcvreserve);
158
  if (error)
159
    goto bad;
160
  so->so_rcv.sb_flags |= SB_NOINTR;
161
  so->so_snd.sb_flags |= SB_NOINTR;
162
 
163
  /* Initialize other non-zero congestion variables */
164
  nmp->nm_sent = 0;
165
  return (0);
166
 
167
bad:
168
  u9fs_disconnect(nmp->nm_so);
169
  nmp->nm_so = 0;
170
  return (error);
171
}
172
 
173
/*
174
 * U9FS disconnect. Clean up and unlink.
175
 */
176
void
177
u9fs_disconnect(struct socket * so)
178
{
179
    soshutdown(so, 2);
180
    soclose(so);
181
}
182
 
183
/*
184
 * Lock a socket against others.
185
 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
186
 * and also to avoid race conditions between the processes with u9fs requests
187
 * in progress when a reconnect is necessary.
188
 */
189
static int
190
u9fs_sndlock(flagp, statep, rep)
191
	register int *flagp;
192
	register int *statep;
193
	struct u9fsreq *rep;
194
{
195
	struct proc *p;
196
	int slpflag = 0, slptimeo = 0;
197
 
198
	if (rep) {
199
		p = rep->r_procp;
200
		if (rep->r_nmp->nm_flag & U9FSMNT_INT)
201
			slpflag = PCATCH;
202
	} else
203
		p = (struct proc *)0;
204
	while (*statep & U9FSSTA_SNDLOCK) {
205
		if (u9fs_sigintr(rep->r_nmp, p))
206
			return (EINTR);
207
		*statep |= U9FSSTA_WANTSND;
208
		(void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1),
209
			"u9fsndlck", slptimeo);
210
		if (slpflag == PCATCH) {
211
			slpflag = 0;
212
			slptimeo = 2 * hz;
213
		}
214
	}
215
	*statep |= U9FSSTA_SNDLOCK;
216
	return (0);
217
}
218
 
219
 
220
/*
221
 * Unlock the stream socket for others.
222
 */
223
static void
224
u9fs_sndunlock(flagp, statep)
225
	register int *flagp;
226
	register int *statep;
227
{
228
 
229
	if ((*statep & U9FSSTA_SNDLOCK) == 0)
230
		panic("u9fs sndunlock");
231
	*statep &= ~U9FSSTA_SNDLOCK;
232
	if (*statep & U9FSSTA_WANTSND) {
233
		*statep &= ~U9FSSTA_WANTSND;
234
		wakeup((caddr_t)flagp);
235
	}
236
}
237
 
238
/*
239
 * Test for a termination condition pending on the process.
240
 * This is used for U9FSMNT_INT mounts.
241
 */
242
int
243
u9fs_sigintr(nmp, p)
244
	struct u9fsmount *nmp;
245
	struct proc * p;
246
{
247
	if (!(nmp->nm_flag & U9FSMNT_INT))
248
		return (0);
249
	if (p && p->p_siglist &&
250
	    (((p->p_siglist & ~p->p_sigmask) & ~p->p_sigignore) &
251
	    U9FSINT_SIGMASK))
252
		return (EINTR);
253
	return (0);
254
}
255
 
256
/*
257
 * This is the u9fs send routine. For connection based socket types, it
258
 * must be called with an u9fs_sndlock() on the socket.
259
 * "rep == NULL" indicates that it has been called from a server.
260
 * For the client side:
261
 * - return EINTR if the RPC is terminated, 0 otherwise
262
 * - set R_MUSTRESEND if the send fails for any reason
263
 * - do any cleanup required by recoverable socket errors (?)
264
 * For the server side:
265
 * - return EINTR or ERESTART if interrupted by a signal
266
 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
267
 * - do any cleanup required by recoverable socket errors (?)
268
 */
269
static int
270
u9fs_send(so, top, req)
271
	register struct socket *so;
272
	register struct mbuf *top;
273
	struct u9fsreq *req;
274
{
275
  int error, soflags, flags;
276
 
277
  soflags = so->so_proto->pr_flags;
278
  if (so->so_type == SOCK_SEQPACKET)
279
    flags = MSG_EOR;
280
  else
281
    flags = 0;
282
 
283
  error = so->so_proto->pr_usrreqs->pru_sosend(so, 0, 0, top, 0,
284
					       flags, req->r_procp);
285
  if (error)
286
    log(LOG_INFO, "u9fs send error %d for server %s\n",error,
287
	  req->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
288
 
289
  return (error);
290
}
291
 
292
static int
293
u9fs_receive(so, mrep, req)	
294
     register struct socket * so;
295
     struct mbuf **mrep;
296
     struct u9fsreq * req;
297
{
298
  struct uio auio;
299
  u_int32_t len;
300
  int error = 0, sotype, rcvflg;
301
 
302
  /*
303
   * Set up arguments for soreceive()
304
   */
305
  *mrep = (struct mbuf *)0;
306
  sotype = req->r_nmp->nm_sotype;
307
 
308
  /*
309
   * For reliable protocols, lock against other senders/receivers
310
   * in case a reconnect is necessary.
311
   * For SOCK_STREAM, first get the Record Mark to find out how much
312
   * more there is to get.
313
   * We must lock the socket against other receivers
314
   * until we have an entire rpc request/reply.
315
   */
316
  if (sotype == SOCK_SEQPACKET ) {
317
    if( (so->so_state & SS_ISCONNECTED) == 0 )
318
      return (EACCES);
319
		auio.uio_resid = len = 1000000;
320
		auio.uio_procp = req->r_procp;
321
		do {
322
			rcvflg = 0;
323
			error =  so->so_proto->pr_usrreqs->pru_soreceive
324
				(so, 0, &auio, mrep,
325
				(struct mbuf **)0, &rcvflg);
326
		} while (error == EWOULDBLOCK);
327
		len -= auio.uio_resid;    
328
  }
329
  if (error) {
330
    m_freem(*mrep);
331
    *mrep = (struct mbuf *)0;
332
  }
333
  return (error);  
334
}
335
 
336
static int
337
u9fs_rcvlock(req)
338
	register struct u9fsreq *req;
339
{
340
	register int *flagp = &req->r_nmp->nm_flag;
341
	register int *statep = &req->r_nmp->nm_state;
342
	int slpflag, slptimeo = 0;
343
 
344
	if (*flagp & U9FSMNT_INT)
345
		slpflag = PCATCH;
346
	else
347
		slpflag = 0;
348
	while (*statep & U9FSSTA_RCVLOCK) {
349
		if (u9fs_sigintr(req->r_nmp, req->r_procp))
350
			return (EINTR);
351
		*statep |= U9FSSTA_WANTRCV;
352
		(void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1), "u9fsrcvlk",
353
			slptimeo);
354
		/*
355
		 * If our reply was recieved while we were sleeping,
356
		 * then just return without taking the lock to avoid a
357
		 * situation where a single iod could 'capture' the
358
		 * recieve lock.
359
		 */
360
		if (req->r_mrep != NULL)
361
			return (EALREADY);
362
		if (slpflag == PCATCH) {
363
			slpflag = 0;
364
			slptimeo = 2 * hz;
365
		}
366
	}
367
	*statep |= U9FSSTA_RCVLOCK;
368
	return (0);
369
}
370
 
371
/*
372
 * Unlock the stream socket for others.
373
 */
374
static void
375
u9fs_rcvunlock(flagp, statep)
376
	register int *flagp;
377
	register int *statep;
378
{
379
 
380
	if ((*statep & U9FSSTA_RCVLOCK) == 0)
381
		panic("u9fs rcvunlock");
382
	*statep &= ~U9FSSTA_RCVLOCK;
383
	if (*statep & U9FSSTA_WANTRCV) {
384
		*statep &= ~U9FSSTA_WANTRCV;
385
		wakeup((caddr_t)flagp);
386
	}
387
}
388
 
389
/*
390
 * Implement receipt of reply on a socket.
391
 * We must search through the list of received datagrams matching them
392
 * with outstanding requests using the xid, until ours is found.
393
 */
394
/* ARGSUSED */
395
static 
396
int u9fs_reply(struct u9fsreq * req)
397
{
398
  int error;
399
  struct mbuf * mrep;
400
  register struct u9fsmount *nmp = req->r_nmp;
401
  u_short tag;
402
  struct u9fsreq * qp;
403
 
404
  /*
405
   * Loop around until we get our own reply
406
   */
407
  for (;;) {
408
    /*
409
     * Lock against other receivers so that I don't get stuck in
410
     * sbwait() after someone else has received my reply for me.
411
     * Also necessary for connection based protocols to avoid
412
     * race conditions during a reconnect.
413
     * If u9fs_rcvlock() returns EALREADY, that means that
414
     * the reply has already been recieved by another
415
     * process and we can return immediately.  In this
416
     * case, the lock is not taken to avoid races with
417
     * other processes.
418
     */
419
    error = u9fs_rcvlock(req);
420
    if (error == EALREADY)
421
      return (0);
422
    if (error)
423
      return (error);
424
    /*
425
     * Get the next Rpc reply off the socket
426
     */
427
    error = u9fs_receive(nmp->nm_so, &mrep, req);
428
    u9fs_rcvunlock(&nmp->nm_flag, &nmp->nm_state);
429
    if (error)
430
      return (error);
431
 
432
    /* extract the tag */
433
    tag = u9p_m_tag(&mrep);
434
 
435
    /*
436
     * Loop through the request list to match up the reply
437
     * Iff no match, just drop the datagram
438
     */
439
    for (qp = nmp->nm_reqq.tqh_first; qp != 0; qp = qp->r_chain.tqe_next) {
440
      if ( qp->r_mrep == 0 && qp->r_tag == tag )
441
	break;
442
    }
443
    if( qp == 0 ) {
444
      m_freem(mrep);
445
      continue;
446
    }
447
 
448
    if( u9p_m_m2s(&mrep, qp->r_rep) ) { /* freed by m2s */
449
      continue;
450
    }
451
 
452
    qp->r_mrep = mrep;  /* should not be freed until the reply is read */
453
 
454
    if( qp == req )
455
      return 0;
456
  }
457
}
458
 
459
int u9fs_request(struct u9fsreq * req, struct u9fsreq * rep, int relm)
460
{
461
  struct mbuf * mreq;
462
  int error,s;
463
  struct u9fsmount * nmp;  
464
 
465
  req->r_rep = rep;
466
  req->r_mrep = 0;
467
  nmp = req->r_nmp;
468
  req->r_tag = u9fs_id_new(nmp->nm_tags);
469
 
470
  mreq = u9p_m_s2m(req);
471
 
472
  /*
473
   * Chain request into list of outstanding requests. Be sure
474
   * to put it LAST so timer finds oldest requests first.
475
   */
476
  s = splsoftclock();
477
  TAILQ_INSERT_TAIL(&nmp->nm_reqq, req, r_chain);
478
  splx(s);
479
 
480
  error = u9fs_send(nmp->nm_so, mreq, req);
481
 
482
  if( !error )
483
    error = u9fs_reply(req);
484
 
485
  /*
486
   * RPC done, unlink the request.
487
   */
488
  s = splsoftclock();
489
  TAILQ_REMOVE(&nmp->nm_reqq, req, r_chain);
490
  splx(s);
491
 
492
  u9fs_id_free(nmp->nm_tags, req->r_tag);
493
 
494
  if( !error && relm ) {
495
	m_freem(req->r_mrep);
496
	req->r_mrep = 0;
497
  }      
498
  if( rep->r_type == Rerror )
499
      error = EACCES;
500
 
501
  return error;
502
}
503