Blame | Last modification | View Log | RSS feed
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/systm.h>
#include <sys/protosw.h>
#include <sys/syslog.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
#include <vm/vm_prot.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
#include <net/if.h>
#include <net/route.h>
#include <netinet/in.h>
#include <9fs/bitstring.h>
#include <9fs/9p.h>
#include <9fs/9auth.h>
#include <9fs/9fs.h>
static struct buf *u9fs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size, struct proc *p));
static void u9fs_prot_buf __P((struct buf *bp, int off, int n));
/*
* Vnode op for read using bio
*/
int
u9fs_bioread(vp, uio, ioflag, cred, getpages)
register struct vnode *vp;
register struct uio *uio;
int ioflag;
struct ucred *cred;
int getpages;
{
register struct u9fsnode *np = VTOU9FS(vp);
register int biosize;
off_t diff;
struct buf *bp = 0;
struct proc *p;
struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
daddr_t lbn;
int error = 0, n = 0, on = 0, bufsize, not_readin;
if (uio->uio_resid == 0)
return (0);
if (uio->uio_offset < 0)
return (EINVAL);
p = uio->uio_procp;
if (vp->v_type != VDIR &&
(uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
return (EFBIG);
biosize = vp->v_mount->mnt_stat.f_iosize;
#if 0
if( np->n_qid.vers ) { /* in cache, check revision */
error = VOP_GETATTR(vp, &vattr, cred, p);
if( error )
return error;
if( np->n_qid.vers != np->n_dir.dir_qid.vers ) {
/* content changed */
u9fs_vinvalbuf(vp, V_SAVE, cred, p, 1);
}
}
#endif
do {
switch (vp->v_type) {
case VREG:
lbn = uio->uio_offset / biosize;
on = uio->uio_offset & (biosize - 1);
not_readin = 1;
#if 0
/*
* Start the read ahead(s), as required.
*/
if (u9fs_numasync > 0 && nmp->nm_readahead > 0) {
for (nra = 0; nra < nmp->nm_readahead &&
(off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
rabn = lbn + 1 + nra;
if (!incore(vp, rabn)) {
rabp = u9fs_getcacheblk(vp, rabn, biosize, p);
if (!rabp)
return (EINTR);
if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (u9fs_asyncio(rabp, cred)) {
rabp->b_flags |= B_INVAL|B_ERROR;
vfs_unbusy_pages(rabp);
brelse(rabp);
}
} else
brelse(rabp);
}
}
}
#endif
/*
* If the block is in the cache and has the required data
* in a valid region, just copy it out.
* Otherwise, get the block and write back/read in,
* as required.
*/
again:
bufsize = biosize;
if ((off_t)(lbn + 1) * biosize > np->n_size &&
(off_t)(lbn + 1) * biosize - np->n_size < biosize) {
bufsize = np->n_size - (off_t)lbn * biosize;
bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
}
bp = u9fs_getcacheblk(vp, lbn, bufsize, p);
if (!bp)
return (EINTR);
/*
* If we are being called from u9fs_getpages, we must
* make sure the buffer is a vmio buffer. The vp will
* already be setup for vmio but there may be some old
* non-vmio buffers attached to it.
*/
if (getpages && !(bp->b_flags & B_VMIO)) {
#ifdef DIAGNOSTIC
printf("u9fs_bioread: non vmio buf found, discarding\n");
#endif
bp->b_flags |= B_NOCACHE;
if (bp->b_dirtyend > 0) {
if ((bp->b_flags & B_DELWRI) == 0)
panic("u9fsbioread");
if (VOP_BWRITE(bp) == EINTR)
return (EINTR);
} else
brelse(bp);
goto again;
}
if ((bp->b_flags & B_CACHE) == 0) {
bp->b_flags |= B_READ;
bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
not_readin = 0;
vfs_busy_pages(bp, 0);
error = u9fs_doio(bp, cred, p);
if (error) {
brelse(bp);
return (error);
}
np->n_qid.vers = np->n_dir.dir_qid.vers;
}
if (bufsize > on) {
n = min((unsigned)(bufsize - on), uio->uio_resid);
} else {
n = 0;
}
diff = np->n_size - uio->uio_offset;
if (diff < n)
n = diff;
if (not_readin && n > 0) {
if (on < bp->b_validoff || (on + n) > bp->b_validend) {
bp->b_flags |= B_NOCACHE;
if (bp->b_dirtyend > 0) {
if ((bp->b_flags & B_DELWRI) == 0)
panic("u9fsbioread");
if (VOP_BWRITE(bp) == EINTR)
return (EINTR);
} else
brelse(bp);
goto again;
}
}
vp->v_lastr = lbn;
diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
if (diff < n)
n = diff;
break;
case VDIR:
biosize = nmp->nm_readdirsize;
lbn = (uoff_t)uio->uio_offset / biosize;
on = uio->uio_offset % biosize;
bp = u9fs_getcacheblk(vp, lbn, biosize, p);
if (!bp)
return (EINTR);
if ((bp->b_flags & B_CACHE) == 0) {
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
error = u9fs_doio(bp, cred, p);
if (error) {
brelse(bp);
}
if (error)
return (error);
np->n_qid.vers = np->n_dir.dir_qid.vers;
}
/*
* Make sure we use a signed variant of min() since
* the second term may be negative.
*/
n = lmin(uio->uio_resid, biosize - bp->b_resid - on);
break;
default:
printf(" u9fs_bioread: type %x unexpected\n",vp->v_type);
break;
};
if (n > 0) {
error = uiomove(bp->b_data + on, (int)n, uio);
}
brelse(bp);
} while (error == 0 && uio->uio_resid > 0 && n > 0);
return (error);
}
/*
* Vnode op for write using bio
*/
int
u9fs_biowrite(vp, uio, ioflag, cred)
register struct vnode *vp;
register struct uio *uio;
register int ioflag;
register struct ucred *cred;
{
register int biosize;
struct proc *p = uio->uio_procp;
struct u9fsnode *np = VTOU9FS(vp);
struct buf *bp;
struct vattr vattr;
struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
daddr_t lbn;
int bufsize;
int n, on, error = 0;
if (ioflag & (IO_APPEND | IO_SYNC)) {
if (ioflag & IO_APPEND) {
error = VOP_GETATTR(vp, &vattr, cred, p);
if (error)
return (error);
uio->uio_offset = np->n_size;
}
}
if (uio->uio_offset < 0)
return (EINVAL);
if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
return (EFBIG);
if (uio->uio_resid == 0)
return (0);
/*
* I use nm_rsize, not nm_wsize so that all buffer cache blocks
* will be the same size within a filesystem. nfs_writerpc will
* still use nm_wsize when sizing the rpc's.
*/
biosize = vp->v_mount->mnt_stat.f_iosize;
do {
lbn = uio->uio_offset / biosize;
on = uio->uio_offset & (biosize-1);
n = min((unsigned)(biosize - on), uio->uio_resid);
if (uio->uio_offset + n > np->n_size) {
np->n_size = uio->uio_offset + n;
vnode_pager_setsize(vp, np->n_size);
}
bufsize = biosize;
if ((off_t)(lbn + 1) * biosize > np->n_size) {
bufsize = np->n_size - (off_t)lbn * biosize;
bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
}
bp = u9fs_getcacheblk(vp, lbn, bufsize, p);
if (!bp)
return (EINTR);
if (bp->b_wcred == NOCRED) {
crhold(cred);
bp->b_wcred = cred;
}
error = uiomove((char *)bp->b_data + on, n, uio);
if (error) {
bp->b_flags |= B_ERROR;
brelse(bp);
return (error);
}
/*
* This will keep the buffer and mmaped regions more coherent.
*/
u9fs_prot_buf(bp, on, n);
bp->b_dirtyoff = on;
bp->b_dirtyend = on + n;
if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
bp->b_validoff > bp->b_dirtyend) {
/* XXX: destroys our read cache if not overlapping */
/* two choice: none implemented
1> keep the bigger(smaller) piece
2> read the missing segment
*/
bp->b_validoff = bp->b_dirtyoff;
bp->b_validend = bp->b_dirtyend;
} else {
bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
}
error = bwrite(bp);
if( error ) {
bp->b_flags |= B_ERROR;
/* brelse(bp); */
return error;
}
} while (uio->uio_resid > 0 && n > 0);
return 0;
}
/*
* Do an I/O operation to/from a cache block. This may be called
* synchronously or from an u9fsiod.
*/
int
u9fs_doio(bp, cr, p)
register struct buf *bp;
struct ucred *cr;
struct proc *p;
{
register struct uio *uiop;
register struct vnode *vp;
struct u9fsnode *np;
struct u9fsmount *nmp;
int error = 0, diff, len;
struct uio uio;
struct iovec io;
vp = bp->b_vp;
np = VTOU9FS(vp);
nmp = VFSTOU9FS(vp->v_mount);
uiop = &uio;
uiop->uio_iov = &io;
uiop->uio_iovcnt = 1;
uiop->uio_segflg = UIO_SYSSPACE;
uiop->uio_procp = p;
if (bp->b_flags & B_READ ) {
io.iov_len = uiop->uio_resid = bp->b_bcount;
io.iov_base = bp->b_data;
uiop->uio_rw = UIO_READ;
switch (vp->v_type) {
case VREG:
uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
error = u9fs_readrpc(vp, uiop, cr);
if (!error) {
bp->b_validoff = 0;
if (uiop->uio_resid) {
/*
* If len > 0, there is a hole in the file and
* no writes after the hole have been pushed to
* the server yet.
* Just zero fill the rest of the valid area.
*/
diff = bp->b_bcount - uiop->uio_resid;
len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE
+ diff);
if (len > 0) {
len = min(len, uiop->uio_resid);
bzero((char *)bp->b_data + diff, len);
bp->b_validend = diff + len;
} else
bp->b_validend = diff;
} else
bp->b_validend = bp->b_bcount;
}
break;
case VDIR:
uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * nmp->nm_readdirsize;
error = u9fs_readdirrpc(vp, uiop, cr);
if (error == 0 && uiop->uio_resid == bp->b_bcount)
bp->b_flags |= B_INVAL;
break;
default:
printf("u9fs_doio: type %x unexpected\n",vp->v_type);
break;
};
if (error) {
bp->b_flags |= B_ERROR;
bp->b_error = error;
}
} else {
if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
if (bp->b_dirtyend > bp->b_dirtyoff) {
io.iov_len = uiop->uio_resid = bp->b_dirtyend
- bp->b_dirtyoff;
uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
+ bp->b_dirtyoff;
io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
uiop->uio_rw = UIO_WRITE;
bp->b_flags |= B_WRITEINPROG;
error = u9fs_writerpc(vp, uiop, cr);
bp->b_flags &= ~B_WRITEINPROG;
if (error) {
bp->b_flags |= B_ERROR;
bp->b_error = np->n_error = error;
np->n_flag |= NWRITEERR;
}
bp->b_dirtyoff = bp->b_dirtyend = 0;
} else {
bp->b_resid = 0;
biodone(bp);
return (0);
}
}
bp->b_resid = uiop->uio_resid;
biodone(bp);
return error;
}
/*
* Get an u9fs cache block.
* Allocate a new one if the block isn't currently in the cache
* and return the block marked busy. If the calling process is
* interrupted by a signal for an interruptible mount point, return
* NULL.
*/
static struct buf *
u9fs_getcacheblk(vp, bn, size, p)
struct vnode *vp;
daddr_t bn;
int size;
struct proc *p;
{
register struct buf *bp;
struct mount *mp;
struct u9fsmount *nmp;
mp = vp->v_mount;
nmp = VFSTOU9FS(mp);
if (nmp->nm_flag & U9FSMNT_INT) {
bp = getblk(vp, bn, size, PCATCH, 0);
while (bp == (struct buf *)0) {
if (u9fs_sigintr(nmp, p))
return ((struct buf *)0);
bp = getblk(vp, bn, size, 0, 2 * hz);
}
} else
bp = getblk(vp, bn, size, 0, 0);
if (vp->v_type == VREG) {
int biosize;
biosize = mp->mnt_stat.f_iosize;
bp->b_blkno = bn * (biosize / DEV_BSIZE);
}
return (bp);
}
static void
u9fs_prot_buf(bp, off, n)
struct buf *bp;
int off;
int n;
{
int pindex, boff, end;
if ((bp->b_flags & B_VMIO) == 0)
return;
end = round_page(off + n);
for (boff = trunc_page(off); boff < end; boff += PAGE_SIZE) {
pindex = boff >> PAGE_SHIFT;
vm_page_protect(bp->b_pages[pindex], VM_PROT_NONE);
}
}
/*
* Flush and invalidate all dirty buffers. If another process is already
* doing the flush, just wait for completion.
*/
int
u9fs_vinvalbuf(vp, flags, cred, p, intrflg)
struct vnode *vp;
int flags;
struct ucred *cred;
struct proc *p;
int intrflg;
{
register struct u9fsnode *np = VTOU9FS(vp);
struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
int error = 0, slpflag, slptimeo;
if (vp->v_flag & VXLOCK) {
return (0);
}
if ((nmp->nm_flag & U9FSMNT_INT) == 0)
intrflg = 0;
if (intrflg) {
slpflag = PCATCH;
slptimeo = 2 * hz;
} else {
slpflag = 0;
slptimeo = 0;
}
/*
* First wait for any other process doing a flush to complete.
*/
while (np->n_flag & NFLUSHINPROG) {
np->n_flag |= NFLUSHWANT;
error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "u9fsvinval",
slptimeo);
if (error && intrflg && u9fs_sigintr(nmp, p))
return (EINTR);
}
/*
* Now, flush as required.
*/
np->n_flag |= NFLUSHINPROG;
error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
while (error) {
if (intrflg && u9fs_sigintr(nmp, p)) {
np->n_flag &= ~NFLUSHINPROG;
if (np->n_flag & NFLUSHWANT) {
np->n_flag &= ~NFLUSHWANT;
wakeup((caddr_t)&np->n_flag);
}
return (EINTR);
}
error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
}
np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
if (np->n_flag & NFLUSHWANT) {
np->n_flag &= ~NFLUSHWANT;
wakeup((caddr_t)&np->n_flag);
}
return (0);
}