summaryrefslogtreecommitdiffstats
path: root/sys/fs/nullfs
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2001-09-12 08:38:13 +0000
committerjulian <julian@FreeBSD.org>2001-09-12 08:38:13 +0000
commit5596676e6c6c1e81e899cd0531f9b1c28a292669 (patch)
treeb1a19fcdf05759281fab0d89efb13f0fdf42102e /sys/fs/nullfs
parent83e00d4274950d2b531c24692cd123538ffbddb9 (diff)
downloadFreeBSD-src-5596676e6c6c1e81e899cd0531f9b1c28a292669.zip
FreeBSD-src-5596676e6c6c1e81e899cd0531f9b1c28a292669.tar.gz
KSE Milestone 2
Note ALL MODULES MUST BE RECOMPILED make the kernel aware that there are smaller units of scheduling than the process. (but only allow one thread per process at this time). This is functionally equivalent to teh previousl -current except that there is a thread associated with each process. Sorry john! (your next MFC will be a doosie!) Reviewed by: peter@freebsd.org, dillon@freebsd.org X-MFC after: ha ha ha ha
Diffstat (limited to 'sys/fs/nullfs')
-rw-r--r--sys/fs/nullfs/null_subr.c20
-rw-r--r--sys/fs/nullfs/null_vfsops.c66
-rw-r--r--sys/fs/nullfs/null_vnops.c70
3 files changed, 78 insertions, 78 deletions
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index 97aa492..add959e 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -110,7 +110,7 @@ null_node_find(mp, lowervp)
struct mount *mp;
struct vnode *lowervp;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
struct null_node_hashhead *hd;
struct null_node *a;
struct vnode *vp;
@@ -123,17 +123,17 @@ null_node_find(mp, lowervp)
*/
hd = NULL_NHASH(lowervp);
loop:
- lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, p);
+ lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, td);
LIST_FOREACH(a, hd, null_hash) {
if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
vp = NULLTOV(a);
- lockmgr(&null_hashlock, LK_RELEASE, NULL, p);
+ lockmgr(&null_hashlock, LK_RELEASE, NULL, td);
/*
* We need vget for the VXLOCK
* stuff, but we don't want to lock
* the lower node.
*/
- if (vget(vp, LK_EXCLUSIVE | LK_CANRECURSE, p)) {
+ if (vget(vp, LK_EXCLUSIVE | LK_CANRECURSE, td)) {
printf ("null_node_find: vget failed.\n");
goto loop;
};
@@ -141,11 +141,11 @@ loop:
* Now we got both vnodes locked, so release the
* lower one.
*/
- VOP_UNLOCK(lowervp, 0, p);
+ VOP_UNLOCK(lowervp, 0, td);
return (vp);
}
}
- lockmgr(&null_hashlock, LK_RELEASE, NULL, p);
+ lockmgr(&null_hashlock, LK_RELEASE, NULL, td);
return NULLVP;
}
@@ -162,7 +162,7 @@ null_node_alloc(mp, lowervp, vpp)
struct vnode *lowervp;
struct vnode **vpp;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
struct null_node_hashhead *hd;
struct null_node *xp;
struct vnode *othervp, *vp;
@@ -211,16 +211,16 @@ null_node_alloc(mp, lowervp, vpp)
* NULL, then we copy that up and manually lock the new vnode.
*/
- lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, p);
+ lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, td);
vp->v_vnlock = lowervp->v_vnlock;
- error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_THISLAYER, p);
+ error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_THISLAYER, td);
if (error)
panic("null_node_alloc: can't lock new vnode\n");
VREF(lowervp);
hd = NULL_NHASH(lowervp);
LIST_INSERT_HEAD(hd, xp, null_hash);
- lockmgr(&null_hashlock, LK_RELEASE, NULL, p);
+ lockmgr(&null_hashlock, LK_RELEASE, NULL, td);
return 0;
}
diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c
index f707751..1ab0aaa 100644
--- a/sys/fs/nullfs/null_vfsops.c
+++ b/sys/fs/nullfs/null_vfsops.c
@@ -63,33 +63,33 @@ static int nullfs_fhtovp(struct mount *mp, struct fid *fidp,
static int nullfs_checkexp(struct mount *mp, struct sockaddr *nam,
int *extflagsp, struct ucred **credanonp);
static int nullfs_mount(struct mount *mp, char *path, caddr_t data,
- struct nameidata *ndp, struct proc *p);
+ struct nameidata *ndp, struct thread *td);
static int nullfs_quotactl(struct mount *mp, int cmd, uid_t uid,
- caddr_t arg, struct proc *p);
+ caddr_t arg, struct thread *td);
static int nullfs_root(struct mount *mp, struct vnode **vpp);
-static int nullfs_start(struct mount *mp, int flags, struct proc *p);
+static int nullfs_start(struct mount *mp, int flags, struct thread *td);
static int nullfs_statfs(struct mount *mp, struct statfs *sbp,
- struct proc *p);
+ struct thread *td);
static int nullfs_sync(struct mount *mp, int waitfor,
- struct ucred *cred, struct proc *p);
-static int nullfs_unmount(struct mount *mp, int mntflags, struct proc *p);
+ struct ucred *cred, struct thread *td);
+static int nullfs_unmount(struct mount *mp, int mntflags, struct thread *td);
static int nullfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp);
static int nullfs_vptofh(struct vnode *vp, struct fid *fhp);
static int nullfs_extattrctl(struct mount *mp, int cmd,
struct vnode *filename_vp,
int namespace, const char *attrname,
- struct proc *p);
+ struct thread *td);
/*
* Mount null layer
*/
static int
-nullfs_mount(mp, path, data, ndp, p)
+nullfs_mount(mp, path, data, ndp, td)
struct mount *mp;
char *path;
caddr_t data;
struct nameidata *ndp;
- struct proc *p;
+ struct thread *td;
{
int error = 0;
struct null_args args;
@@ -106,7 +106,7 @@ nullfs_mount(mp, path, data, ndp, p)
*/
if (mp->mnt_flag & MNT_UPDATE) {
return (EOPNOTSUPP);
- /* return VFS_MOUNT(MOUNTTONULLMOUNT(mp)->nullm_vfs, path, data, ndp, p);*/
+ /* return VFS_MOUNT(MOUNTTONULLMOUNT(mp)->nullm_vfs, path, data, ndp, td);*/
}
/*
@@ -122,20 +122,20 @@ nullfs_mount(mp, path, data, ndp, p)
*/
if ((mp->mnt_vnodecovered->v_op == null_vnodeop_p) &&
VOP_ISLOCKED(mp->mnt_vnodecovered, NULL)) {
- VOP_UNLOCK(mp->mnt_vnodecovered, 0, p);
+ VOP_UNLOCK(mp->mnt_vnodecovered, 0, td);
isvnunlocked = 1;
}
/*
* Find lower node
*/
NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT|LOCKLEAF,
- UIO_USERSPACE, args.target, p);
+ UIO_USERSPACE, args.target, td);
error = namei(ndp);
/*
* Re-lock vnode.
*/
if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered, NULL))
- vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY, td);
if (error)
return (error);
@@ -174,7 +174,7 @@ nullfs_mount(mp, path, data, ndp, p)
/*
* Unlock the node (either the lower or the alias)
*/
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
/*
* Make sure the node alias worked
*/
@@ -199,7 +199,7 @@ nullfs_mount(mp, path, data, ndp, p)
(void) copyinstr(args.target, mp->mnt_stat.f_mntfromname,
MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
- (void)nullfs_statfs(mp, &mp->mnt_stat, p);
+ (void)nullfs_statfs(mp, &mp->mnt_stat, td);
NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n",
mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
return (0);
@@ -211,23 +211,23 @@ nullfs_mount(mp, path, data, ndp, p)
* when that filesystem was mounted.
*/
static int
-nullfs_start(mp, flags, p)
+nullfs_start(mp, flags, td)
struct mount *mp;
int flags;
- struct proc *p;
+ struct thread *td;
{
return (0);
- /* return VFS_START(MOUNTTONULLMOUNT(mp)->nullm_vfs, flags, p); */
+ /* return VFS_START(MOUNTTONULLMOUNT(mp)->nullm_vfs, flags, td); */
}
/*
* Free reference to null layer
*/
static int
-nullfs_unmount(mp, mntflags, p)
+nullfs_unmount(mp, mntflags, td)
struct mount *mp;
int mntflags;
- struct proc *p;
+ struct thread *td;
{
void *mntdata;
int error;
@@ -257,7 +257,7 @@ nullfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
struct vnode *vp;
NULLFSDEBUG("nullfs_root(mp = %p, vp = %p->%p)\n", (void *)mp,
@@ -277,27 +277,27 @@ nullfs_root(mp, vpp)
return (EDEADLK);
}
#endif
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
*vpp = vp;
return 0;
}
static int
-nullfs_quotactl(mp, cmd, uid, arg, p)
+nullfs_quotactl(mp, cmd, uid, arg, td)
struct mount *mp;
int cmd;
uid_t uid;
caddr_t arg;
- struct proc *p;
+ struct thread *td;
{
- return VFS_QUOTACTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, uid, arg, p);
+ return VFS_QUOTACTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, uid, arg, td);
}
static int
-nullfs_statfs(mp, sbp, p)
+nullfs_statfs(mp, sbp, td)
struct mount *mp;
struct statfs *sbp;
- struct proc *p;
+ struct thread *td;
{
int error;
struct statfs mstat;
@@ -308,7 +308,7 @@ nullfs_statfs(mp, sbp, p)
bzero(&mstat, sizeof(mstat));
- error = VFS_STATFS(MOUNTTONULLMOUNT(mp)->nullm_vfs, &mstat, p);
+ error = VFS_STATFS(MOUNTTONULLMOUNT(mp)->nullm_vfs, &mstat, td);
if (error)
return (error);
@@ -331,11 +331,11 @@ nullfs_statfs(mp, sbp, p)
}
static int
-nullfs_sync(mp, waitfor, cred, p)
+nullfs_sync(mp, waitfor, cred, td)
struct mount *mp;
int waitfor;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
/*
* XXX - Assumes no data cached at null layer.
@@ -392,16 +392,16 @@ nullfs_vptofh(vp, fhp)
}
static int
-nullfs_extattrctl(mp, cmd, filename_vp, namespace, attrname, p)
+nullfs_extattrctl(mp, cmd, filename_vp, namespace, attrname, td)
struct mount *mp;
int cmd;
struct vnode *filename_vp;
int namespace;
const char *attrname;
- struct proc *p;
+ struct thread *td;
{
return VFS_EXTATTRCTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, filename_vp,
- namespace, attrname, p);
+ namespace, attrname, td);
}
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index 0801c0b..3683ac1 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -320,7 +320,7 @@ null_bypass(ap)
*(vps_p[i]) = old_vps[i];
#if 0
if (reles & VDESC_VP0_WILLUNLOCK)
- VOP_UNLOCK(*(vps_p[i]), LK_THISLAYER, curproc);
+ VOP_UNLOCK(*(vps_p[i]), LK_THISLAYER, curthread);
#endif
if (reles & VDESC_VP0_WILLRELE)
vrele(*(vps_p[i]));
@@ -368,7 +368,7 @@ null_lookup(ap)
{
struct componentname *cnp = ap->a_cnp;
struct vnode *dvp = ap->a_dvp;
- struct proc *p = cnp->cn_proc;
+ struct thread *td = cnp->cn_thread;
int flags = cnp->cn_flags;
struct vnode *vp, *ldvp, *lvp;
int error;
@@ -393,7 +393,7 @@ null_lookup(ap)
* tracked by underlying filesystem.
*/
if (cnp->cn_flags & PDIRUNLOCK)
- VOP_UNLOCK(dvp, LK_THISLAYER, p);
+ VOP_UNLOCK(dvp, LK_THISLAYER, td);
if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
if (ldvp == lvp) {
*ap->a_vpp = dvp;
@@ -418,7 +418,7 @@ null_setattr(ap)
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -464,7 +464,7 @@ null_getattr(ap)
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
int error;
@@ -485,7 +485,7 @@ null_access(ap)
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -520,7 +520,7 @@ null_open(ap)
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -581,12 +581,12 @@ null_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
int flags = ap->a_flags;
- struct proc *p = ap->a_p;
+ struct thread *td = ap->a_td;
struct vnode *lvp;
int error;
@@ -594,7 +594,7 @@ null_lock(ap)
if (vp->v_vnlock != NULL)
return 0; /* lock is shared across layers */
error = lockmgr(&vp->v_lock, flags & ~LK_THISLAYER,
- &vp->v_interlock, p);
+ &vp->v_interlock, td);
return (error);
}
@@ -611,9 +611,9 @@ null_lock(ap)
NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n");
return(lockmgr(vp->v_vnlock,
(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
- &vp->v_interlock, p));
+ &vp->v_interlock, td));
}
- return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock, p));
+ return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td));
} else {
/*
* To prevent race conditions involving doing a lookup
@@ -625,21 +625,21 @@ null_lock(ap)
*/
lvp = NULLVPTOLOWERVP(vp);
if (lvp == NULL)
- return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, p));
+ return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, td));
if (flags & LK_INTERLOCK) {
mtx_unlock(&vp->v_interlock);
flags &= ~LK_INTERLOCK;
}
if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
error = VOP_LOCK(lvp,
- (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, p);
+ (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, td);
} else
- error = VOP_LOCK(lvp, flags, p);
+ error = VOP_LOCK(lvp, flags, td);
if (error)
return (error);
- error = lockmgr(&vp->v_lock, flags, &vp->v_interlock, p);
+ error = lockmgr(&vp->v_lock, flags, &vp->v_interlock, td);
if (error)
- VOP_UNLOCK(lvp, 0, p);
+ VOP_UNLOCK(lvp, 0, td);
return (error);
}
}
@@ -654,12 +654,12 @@ null_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
int flags = ap->a_flags;
- struct proc *p = ap->a_p;
+ struct thread *td = ap->a_td;
struct vnode *lvp;
if (vp->v_vnlock != NULL) {
@@ -667,35 +667,35 @@ null_unlock(ap)
return 0; /* the lock is shared across layers */
flags &= ~LK_THISLAYER;
return (lockmgr(vp->v_vnlock, flags | LK_RELEASE,
- &vp->v_interlock, p));
+ &vp->v_interlock, td));
}
lvp = NULLVPTOLOWERVP(vp);
if (lvp == NULL)
- return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, p));
+ return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td));
if ((flags & LK_THISLAYER) == 0) {
if (flags & LK_INTERLOCK) {
mtx_unlock(&vp->v_interlock);
flags &= ~LK_INTERLOCK;
}
- VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, p);
+ VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, td);
} else
flags &= ~LK_THISLAYER;
- return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, p));
+ return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td));
}
static int
null_islocked(ap)
struct vop_islocked_args /* {
struct vnode *a_vp;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
- struct proc *p = ap->a_p;
+ struct thread *td = ap->a_td;
if (vp->v_vnlock != NULL)
- return (lockstatus(vp->v_vnlock, p));
- return (lockstatus(&vp->v_lock, p));
+ return (lockstatus(vp->v_vnlock, td));
+ return (lockstatus(&vp->v_lock, td));
}
/*
@@ -707,23 +707,23 @@ static int
null_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
- struct proc *p = ap->a_p;
+ struct thread *td = ap->a_td;
struct null_node *xp = VTONULL(vp);
struct vnode *lowervp = xp->null_lowervp;
- lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, p);
+ lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, td);
LIST_REMOVE(xp, null_hash);
- lockmgr(&null_hashlock, LK_RELEASE, NULL, p);
+ lockmgr(&null_hashlock, LK_RELEASE, NULL, td);
xp->null_lowervp = NULLVP;
if (vp->v_vnlock != NULL) {
vp->v_vnlock = &vp->v_lock; /* we no longer share the lock */
} else
- VOP_UNLOCK(vp, LK_THISLAYER, p);
+ VOP_UNLOCK(vp, LK_THISLAYER, td);
vput(lowervp);
/*
@@ -743,7 +743,7 @@ static int
null_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -774,7 +774,7 @@ null_createvobject(ap)
struct vop_createvobject_args /* {
struct vnode *vp;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -783,7 +783,7 @@ null_createvobject(ap)
if (vp->v_type == VNON || lowervp == NULL)
return 0;
- error = VOP_CREATEVOBJECT(lowervp, ap->a_cred, ap->a_p);
+ error = VOP_CREATEVOBJECT(lowervp, ap->a_cred, ap->a_td);
if (error)
return (error);
vp->v_flag |= VOBJBUF;
OpenPOWER on IntegriCloud