summaryrefslogtreecommitdiffstats
path: root/sys/ufs/ffs
diff options
context:
space:
mode:
Diffstat (limited to 'sys/ufs/ffs')
-rw-r--r--sys/ufs/ffs/ffs_balloc.c6
-rw-r--r--sys/ufs/ffs/ffs_extern.h20
-rw-r--r--sys/ufs/ffs/ffs_inode.c10
-rw-r--r--sys/ufs/ffs/ffs_snapshot.c106
-rw-r--r--sys/ufs/ffs/ffs_softdep.c194
-rw-r--r--sys/ufs/ffs/ffs_softdep_stub.c10
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c144
-rw-r--r--sys/ufs/ffs/ffs_vnops.c6
8 files changed, 249 insertions, 247 deletions
diff --git a/sys/ufs/ffs/ffs_balloc.c b/sys/ufs/ffs/ffs_balloc.c
index cf57bc7..26747fc 100644
--- a/sys/ufs/ffs/ffs_balloc.c
+++ b/sys/ufs/ffs/ffs_balloc.c
@@ -72,7 +72,7 @@ ffs_balloc(struct vnode *a_vp, off_t a_startoffset, int a_size,
int deallocated, osize, nsize, num, i, error;
ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
int unwindidx = -1;
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
vp = a_vp;
ip = VTOI(vp);
@@ -350,7 +350,7 @@ fail:
* occurence. The error return from fsync is ignored as we already
* have an error to return to the user.
*/
- (void) VOP_FSYNC(vp, cred, MNT_WAIT, p);
+ (void) VOP_FSYNC(vp, cred, MNT_WAIT, td);
for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
ffs_blkfree(ip, *blkp, fs->fs_bsize);
deallocated += fs->fs_bsize;
@@ -387,6 +387,6 @@ fail:
ip->i_blocks -= btodb(deallocated);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
}
- (void) VOP_FSYNC(vp, cred, MNT_WAIT, p);
+ (void) VOP_FSYNC(vp, cred, MNT_WAIT, td);
return (error);
}
diff --git a/sys/ufs/ffs/ffs_extern.h b/sys/ufs/ffs/ffs_extern.h
index e7cdc17..49f0279 100644
--- a/sys/ufs/ffs/ffs_extern.h
+++ b/sys/ufs/ffs/ffs_extern.h
@@ -44,7 +44,7 @@ struct fs;
struct inode;
struct malloc_type;
struct mount;
-struct proc;
+struct thread;
struct sockaddr;
struct statfs;
struct ucred;
@@ -64,16 +64,16 @@ ufs_daddr_t ffs_blkpref __P((struct inode *, ufs_daddr_t, int, ufs_daddr_t *));
void ffs_clrblock __P((struct fs *, u_char *, ufs_daddr_t));
void ffs_clusteracct __P((struct fs *, struct cg *, ufs_daddr_t, int));
int ffs_fhtovp __P((struct mount *, struct fid *, struct vnode **));
-int ffs_flushfiles __P((struct mount *, int, struct proc *));
+int ffs_flushfiles __P((struct mount *, int, struct thread *));
void ffs_fragacct __P((struct fs *, int, int32_t [], int));
int ffs_freefile __P((struct inode *, ino_t, int ));
int ffs_isblock __P((struct fs *, u_char *, ufs_daddr_t));
int ffs_isfreeblock __P((struct fs *, unsigned char *, ufs_daddr_t));
-int ffs_mountfs __P((struct vnode *, struct mount *, struct proc *,
+int ffs_mountfs __P((struct vnode *, struct mount *, struct thread *,
struct malloc_type *));
int ffs_mountroot __P((void));
int ffs_mount __P((struct mount *, char *, caddr_t, struct nameidata *,
- struct proc *));
+ struct thread *));
int ffs_reallocblks __P((struct vop_reallocblks_args *));
int ffs_realloccg __P((struct inode *,
ufs_daddr_t, ufs_daddr_t, int, int, struct ucred *, struct buf **));
@@ -83,10 +83,10 @@ void ffs_snapremove __P((struct vnode *vp));
int ffs_snapshot __P((struct mount *mp, char *snapfile));
void ffs_snapshot_mount __P((struct mount *mp));
void ffs_snapshot_unmount __P((struct mount *mp));
-int ffs_statfs __P((struct mount *, struct statfs *, struct proc *));
-int ffs_sync __P((struct mount *, int, struct ucred *, struct proc *));
-int ffs_truncate __P((struct vnode *, off_t, int, struct ucred *, struct proc *));
-int ffs_unmount __P((struct mount *, int, struct proc *));
+int ffs_statfs __P((struct mount *, struct statfs *, struct thread *));
+int ffs_sync __P((struct mount *, int, struct ucred *, struct thread *));
+int ffs_truncate __P((struct vnode *, off_t, int, struct ucred *, struct thread *));
+int ffs_unmount __P((struct mount *, int, struct thread *));
int ffs_update __P((struct vnode *, int));
int ffs_valloc __P((struct vnode *, int, struct ucred *, struct vnode **));
@@ -104,8 +104,8 @@ extern vop_t **ffs_fifoop_p;
void softdep_initialize __P((void));
int softdep_mount __P((struct vnode *, struct mount *, struct fs *,
struct ucred *));
-int softdep_flushworklist __P((struct mount *, int *, struct proc *));
-int softdep_flushfiles __P((struct mount *, int, struct proc *));
+int softdep_flushworklist __P((struct mount *, int *, struct thread *));
+int softdep_flushfiles __P((struct mount *, int, struct thread *));
void softdep_update_inodeblock __P((struct inode *, struct buf *, int));
void softdep_load_inodeblock __P((struct inode *));
void softdep_freefile __P((struct vnode *, ino_t, int));
diff --git a/sys/ufs/ffs/ffs_inode.c b/sys/ufs/ffs/ffs_inode.c
index 3af19b5..d0cd063 100644
--- a/sys/ufs/ffs/ffs_inode.c
+++ b/sys/ufs/ffs/ffs_inode.c
@@ -130,12 +130,12 @@ ffs_update(vp, waitfor)
* disk blocks.
*/
int
-ffs_truncate(vp, length, flags, cred, p)
+ffs_truncate(vp, length, flags, cred, td)
struct vnode *vp;
off_t length;
int flags;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
register struct vnode *ovp = vp;
ufs_daddr_t lastblock;
@@ -191,7 +191,7 @@ ffs_truncate(vp, length, flags, cred, p)
* so that it will have no data structures left.
*/
if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT,
- p)) != 0)
+ td)) != 0)
return (error);
if (oip->i_flag & IN_SPACECOUNTED)
fs->fs_pendingblocks -= oip->i_blocks;
@@ -200,7 +200,7 @@ ffs_truncate(vp, length, flags, cred, p)
(void) chkdq(oip, -oip->i_blocks, NOCRED, 0);
#endif
softdep_setup_freeblocks(oip, length);
- vinvalbuf(ovp, 0, cred, p, 0, 0);
+ vinvalbuf(ovp, 0, cred, td, 0, 0);
oip->i_flag |= IN_CHANGE | IN_UPDATE;
return (ffs_update(ovp, 0));
}
@@ -302,7 +302,7 @@ ffs_truncate(vp, length, flags, cred, p)
bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
oip->i_size = osize;
- error = vtruncbuf(ovp, cred, p, length, fs->fs_bsize);
+ error = vtruncbuf(ovp, cred, td, length, fs->fs_bsize);
if (error && (allerror == 0))
allerror = error;
diff --git a/sys/ufs/ffs/ffs_snapshot.c b/sys/ufs/ffs/ffs_snapshot.c
index 1cbfe28..8085103 100644
--- a/sys/ufs/ffs/ffs_snapshot.c
+++ b/sys/ufs/ffs/ffs_snapshot.c
@@ -105,7 +105,7 @@ ffs_snapshot(mp, snapfile)
void *space;
struct fs *copy_fs = NULL, *fs = VFSTOUFS(mp)->um_fs;
struct snaphead *snaphead;
- struct proc *p = CURPROC;
+ struct thread *td = curthread;
struct inode *ip, *xp;
struct buf *bp, *nbp, *ibp, *sbp = NULL;
struct nameidata nd;
@@ -129,7 +129,7 @@ ffs_snapshot(mp, snapfile)
* Create the snapshot file.
*/
restart:
- NDINIT(&nd, CREATE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE, snapfile, p);
+ NDINIT(&nd, CREATE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE, snapfile, td);
if ((error = namei(&nd)) != 0)
return (error);
if (nd.ni_vp != NULL) {
@@ -162,7 +162,7 @@ restart:
return (error);
goto restart;
}
- VOP_LEASE(nd.ni_dvp, p, KERNCRED, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, KERNCRED, LEASE_WRITE);
error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vat);
vput(nd.ni_dvp);
if (error) {
@@ -199,7 +199,7 @@ restart:
*/
for (blkno = NDADDR; blkno < numblks; blkno += NINDIR(fs)) {
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)blkno),
- fs->fs_bsize, p->p_ucred, B_METAONLY, &ibp);
+ fs->fs_bsize, td->td_proc->p_ucred, B_METAONLY, &ibp);
if (error)
goto out;
bdwrite(ibp);
@@ -260,7 +260,7 @@ restart:
/*
* Ensure that the snapshot is completely on disk.
*/
- if ((error = VOP_FSYNC(vp, KERNCRED, MNT_WAIT, p)) != 0)
+ if ((error = VOP_FSYNC(vp, KERNCRED, MNT_WAIT, td)) != 0)
goto out;
/*
* All allocations are done, so we can now snapshot the system.
@@ -464,12 +464,12 @@ out1:
out:
mp->mnt_flag = flag;
if (error)
- (void) UFS_TRUNCATE(vp, (off_t)0, 0, NOCRED, p);
- (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, p);
+ (void) UFS_TRUNCATE(vp, (off_t)0, 0, NOCRED, td);
+ (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, td);
if (error)
vput(vp);
else
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(wrtmp);
return (error);
}
@@ -840,7 +840,7 @@ ffs_snapblkfree(freeip, bno, size)
{
struct buf *ibp, *cbp, *savedcbp = 0;
struct fs *fs = freeip->i_fs;
- struct proc *p = CURPROC;
+ struct thread *td = curthread;
struct inode *ip;
struct vnode *vp;
ufs_daddr_t lbn, blkno;
@@ -857,12 +857,12 @@ ffs_snapblkfree(freeip, bno, size)
if (lbn < NDADDR) {
blkno = ip->i_db[lbn];
} else {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- p->p_flag |= P_COWINPROGRESS;
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ td->td_proc->p_flag |= P_COWINPROGRESS;
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn),
fs->fs_bsize, KERNCRED, B_METAONLY, &ibp);
- p->p_flag &= ~P_COWINPROGRESS;
- VOP_UNLOCK(vp, 0, p);
+ td->td_proc->p_flag &= ~P_COWINPROGRESS;
+ VOP_UNLOCK(vp, 0, td);
if (error)
break;
indiroff = (lbn - NDADDR) % NINDIR(fs);
@@ -888,7 +888,7 @@ ffs_snapblkfree(freeip, bno, size)
case BLK_SNAP:
if (claimedblk)
panic("snapblkfree: inconsistent block type");
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (lbn < NDADDR) {
ip->i_db[lbn] = BLK_NOCOPY;
ip->i_flag |= IN_CHANGE | IN_UPDATE;
@@ -897,7 +897,7 @@ ffs_snapblkfree(freeip, bno, size)
BLK_NOCOPY;
bdwrite(ibp);
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
continue;
/*
* A block that we map is being freed. If it has not been
@@ -921,7 +921,7 @@ ffs_snapblkfree(freeip, bno, size)
"Grabonremove: snapino", ip->i_number, lbn,
freeip->i_number);
#endif
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (lbn < NDADDR) {
ip->i_db[lbn] = bno;
} else {
@@ -930,7 +930,7 @@ ffs_snapblkfree(freeip, bno, size)
}
ip->i_blocks += btodb(size);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
return (1);
}
if (lbn >= NDADDR)
@@ -940,13 +940,13 @@ ffs_snapblkfree(freeip, bno, size)
* allocation will never require any additional allocations for
* the snapshot inode.
*/
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- p->p_flag |= P_COWINPROGRESS;
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ td->td_proc->p_flag |= P_COWINPROGRESS;
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn),
fs->fs_bsize, KERNCRED, 0, &cbp);
- p->p_flag &= ~P_COWINPROGRESS;
+ td->td_proc->p_flag &= ~P_COWINPROGRESS;
if (error) {
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
break;
}
#ifdef DEBUG
@@ -966,8 +966,8 @@ ffs_snapblkfree(freeip, bno, size)
bcopy(savedcbp->b_data, cbp->b_data, fs->fs_bsize);
bawrite(cbp);
if (dopersistence && ip->i_effnlink > 0)
- (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, p);
- VOP_UNLOCK(vp, 0, p);
+ (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, td);
+ VOP_UNLOCK(vp, 0, td);
continue;
}
/*
@@ -977,11 +977,11 @@ ffs_snapblkfree(freeip, bno, size)
bzero(cbp->b_data, fs->fs_bsize);
bawrite(cbp);
if (dopersistence && ip->i_effnlink > 0)
- (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, p);
- VOP_UNLOCK(vp, 0, p);
+ (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, td);
+ VOP_UNLOCK(vp, 0, td);
break;
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
savedcbp = cbp;
}
/*
@@ -993,9 +993,9 @@ ffs_snapblkfree(freeip, bno, size)
vp = savedcbp->b_vp;
bawrite(savedcbp);
if (dopersistence && VTOI(vp)->i_effnlink > 0) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, p);
- VOP_UNLOCK(vp, 0, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, td);
+ VOP_UNLOCK(vp, 0, td);
}
}
/*
@@ -1016,7 +1016,7 @@ ffs_snapshot_mount(mp)
{
struct ufsmount *ump = VFSTOUFS(mp);
struct fs *fs = ump->um_fs;
- struct proc *p = CURPROC;
+ struct thread *td = curthread;
struct snaphead *snaphead;
struct vnode *vp;
struct inode *ip;
@@ -1052,7 +1052,7 @@ ffs_snapshot_mount(mp)
vp->v_flag |= VSYSTEM;
ump->um_devvp->v_rdev->si_copyonwrite = ffs_copyonwrite;
ump->um_devvp->v_flag |= VCOPYONWRITE;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
}
}
@@ -1087,7 +1087,7 @@ ffs_copyonwrite(devvp, bp)
struct buf *bp;
{
struct buf *ibp, *cbp, *savedcbp = 0;
- struct proc *p = CURPROC;
+ struct thread *td = curthread;
struct fs *fs;
struct inode *ip;
struct vnode *vp;
@@ -1096,7 +1096,7 @@ ffs_copyonwrite(devvp, bp)
fs = TAILQ_FIRST(&devvp->v_rdev->si_snapshots)->i_fs;
lbn = fragstoblks(fs, dbtofsb(fs, bp->b_blkno));
- if (p->p_flag & P_COWINPROGRESS)
+ if (td->td_proc->p_flag & P_COWINPROGRESS)
panic("ffs_copyonwrite: recursive call");
TAILQ_FOREACH(ip, &devvp->v_rdev->si_snapshots, i_nextsnap) {
vp = ITOV(ip);
@@ -1117,19 +1117,19 @@ ffs_copyonwrite(devvp, bp)
* sleep briefly, and try again.
*/
retry:
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (lbn < NDADDR) {
blkno = ip->i_db[lbn];
} else {
- p->p_flag |= P_COWINPROGRESS;
+ td->td_proc->p_flag |= P_COWINPROGRESS;
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn),
fs->fs_bsize, KERNCRED, B_METAONLY | B_NOWAIT, &ibp);
- p->p_flag &= ~P_COWINPROGRESS;
+ td->td_proc->p_flag &= ~P_COWINPROGRESS;
if (error) {
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (error != EWOULDBLOCK)
break;
- tsleep(vp, p->p_pri.pri_user, "nap", 1);
+ tsleep(vp, td->td_ksegrp->kg_pri.pri_user, "nap", 1);
goto retry;
}
indiroff = (lbn - NDADDR) % NINDIR(fs);
@@ -1141,7 +1141,7 @@ retry:
panic("ffs_copyonwrite: bad copy block");
#endif
if (blkno != 0) {
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
continue;
}
/*
@@ -1149,15 +1149,15 @@ retry:
* allocation will never require any additional allocations for
* the snapshot inode.
*/
- p->p_flag |= P_COWINPROGRESS;
+ td->td_proc->p_flag |= P_COWINPROGRESS;
error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn),
fs->fs_bsize, KERNCRED, B_NOWAIT, &cbp);
- p->p_flag &= ~P_COWINPROGRESS;
+ td->td_proc->p_flag &= ~P_COWINPROGRESS;
if (error) {
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (error != EWOULDBLOCK)
break;
- tsleep(vp, p->p_pri.pri_user, "nap", 1);
+ tsleep(vp, td->td_ksegrp->kg_pri.pri_user, "nap", 1);
goto retry;
}
#ifdef DEBUG
@@ -1183,8 +1183,8 @@ retry:
bcopy(savedcbp->b_data, cbp->b_data, fs->fs_bsize);
bawrite(cbp);
if (dopersistence && ip->i_effnlink > 0)
- (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, p);
- VOP_UNLOCK(vp, 0, p);
+ (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, td);
+ VOP_UNLOCK(vp, 0, td);
continue;
}
/*
@@ -1194,12 +1194,12 @@ retry:
bzero(cbp->b_data, fs->fs_bsize);
bawrite(cbp);
if (dopersistence && ip->i_effnlink > 0)
- (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, p);
- VOP_UNLOCK(vp, 0, p);
+ (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, td);
+ VOP_UNLOCK(vp, 0, td);
break;
}
savedcbp = cbp;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
}
/*
* Note that we need to synchronously write snapshots that
@@ -1210,9 +1210,9 @@ retry:
vp = savedcbp->b_vp;
bawrite(savedcbp);
if (dopersistence && VTOI(vp)->i_effnlink > 0) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, p);
- VOP_UNLOCK(vp, 0, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ (void) VOP_FSYNC(vp, KERNCRED, MNT_WAIT, td);
+ VOP_UNLOCK(vp, 0, td);
}
}
return (error);
@@ -1229,7 +1229,7 @@ readblock(bp, lbn)
{
struct uio auio;
struct iovec aiov;
- struct proc *p = CURPROC;
+ struct thread *td = curthread;
struct inode *ip = VTOI(bp->b_vp);
aiov.iov_base = bp->b_data;
@@ -1240,6 +1240,6 @@ readblock(bp, lbn)
auio.uio_resid = bp->b_bcount;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_SYSSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
return (physio(ip->i_devvp->v_rdev, &auio, 0));
}
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 86f9111..a4a35bc 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -149,8 +149,8 @@ static struct malloc_type *memtype[] = {
static void softdep_error __P((char *, int));
static void drain_output __P((struct vnode *, int));
static int getdirtybuf __P((struct buf **, int));
-static void clear_remove __P((struct proc *));
-static void clear_inodedeps __P((struct proc *));
+static void clear_remove __P((struct thread *));
+static void clear_inodedeps __P((struct thread *));
static int flush_pagedep_deps __P((struct vnode *, struct mount *,
struct diraddhd *));
static int flush_inodedep_deps __P((struct fs *, ino_t));
@@ -239,10 +239,11 @@ static struct lockit {
#define FREE_LOCK_INTERLOCKED(lk)
#else /* DEBUG */
+#define NOHOLDER ((struct thread *)-1)
static struct lockit {
int lkt_spl;
- pid_t lkt_held;
-} lk = { 0, -1 };
+ struct thread * lkt_held;
+} lk = { 0, NOHOLDER };
static int lockcnt;
static void acquire_lock __P((struct lockit *));
@@ -259,18 +260,18 @@ static void
acquire_lock(lk)
struct lockit *lk;
{
- pid_t holder;
+ struct thread *holder;
- if (lk->lkt_held != -1) {
+ if (lk->lkt_held != NOHOLDER) {
holder = lk->lkt_held;
FREE_LOCK(lk);
- if (holder == CURPROC->p_pid)
+ if (holder == curthread)
panic("softdep_lock: locking against myself");
else
- panic("softdep_lock: lock held by %d", holder);
+ panic("softdep_lock: lock held by %p", holder);
}
lk->lkt_spl = splbio();
- lk->lkt_held = CURPROC->p_pid;
+ lk->lkt_held = curthread;
lockcnt++;
}
@@ -279,9 +280,9 @@ free_lock(lk)
struct lockit *lk;
{
- if (lk->lkt_held == -1)
+ if (lk->lkt_held == NOHOLDER)
panic("softdep_unlock: lock not held");
- lk->lkt_held = -1;
+ lk->lkt_held = NOHOLDER;
splx(lk->lkt_spl);
}
@@ -289,18 +290,18 @@ static void
acquire_lock_interlocked(lk)
struct lockit *lk;
{
- pid_t holder;
+ struct thread *holder;
- if (lk->lkt_held != -1) {
+ if (lk->lkt_held != NOHOLDER) {
holder = lk->lkt_held;
FREE_LOCK(lk);
- if (holder == CURPROC->p_pid)
+ if (holder == curthread)
panic("softdep_lock_interlocked: locking against self");
else
- panic("softdep_lock_interlocked: lock held by %d",
+ panic("softdep_lock_interlocked: lock held by %p",
holder);
}
- lk->lkt_held = CURPROC->p_pid;
+ lk->lkt_held = curthread;
lockcnt++;
}
@@ -309,9 +310,9 @@ free_lock_interlocked(lk)
struct lockit *lk;
{
- if (lk->lkt_held == -1)
+ if (lk->lkt_held == NOHOLDER)
panic("softdep_unlock_interlocked: lock not held");
- lk->lkt_held = -1;
+ lk->lkt_held = NOHOLDER;
}
#endif /* DEBUG */
@@ -320,7 +321,7 @@ free_lock_interlocked(lk)
*/
struct sema {
int value;
- pid_t holder;
+ struct thread *holder;
char *name;
int prio;
int timo;
@@ -336,7 +337,7 @@ sema_init(semap, name, prio, timo)
int prio, timo;
{
- semap->holder = -1;
+ semap->holder = NOHOLDER;
semap->value = 0;
semap->name = name;
semap->prio = prio;
@@ -359,7 +360,7 @@ sema_get(semap, interlock)
}
return (0);
}
- semap->holder = CURPROC->p_pid;
+ semap->holder = curthread;
if (interlock != NULL)
FREE_LOCK(interlock);
return (1);
@@ -370,8 +371,8 @@ sema_release(semap)
struct sema *semap;
{
- if (semap->value <= 0 || semap->holder != CURPROC->p_pid) {
- if (lk.lkt_held != -1)
+ if (semap->value <= 0 || semap->holder != curthread) {
+ if (lk.lkt_held != NOHOLDER)
FREE_LOCK(&lk);
panic("sema_release: not held");
}
@@ -379,7 +380,7 @@ sema_release(semap)
semap->value = 0;
wakeup(semap);
}
- semap->holder = -1;
+ semap->holder = NOHOLDER;
}
/*
@@ -412,7 +413,7 @@ worklist_insert(head, item)
struct worklist *item;
{
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("worklist_insert: lock not held");
if (item->wk_state & ONWORKLIST) {
FREE_LOCK(&lk);
@@ -427,7 +428,7 @@ worklist_remove(item)
struct worklist *item;
{
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("worklist_remove: lock not held");
if ((item->wk_state & ONWORKLIST) == 0) {
FREE_LOCK(&lk);
@@ -444,12 +445,12 @@ workitem_free(item, type)
{
if (item->wk_state & ONWORKLIST) {
- if (lk.lkt_held != -1)
+ if (lk.lkt_held != NOHOLDER)
FREE_LOCK(&lk);
panic("workitem_free: still on list");
}
if (item->wk_type != type) {
- if (lk.lkt_held != -1)
+ if (lk.lkt_held != NOHOLDER)
FREE_LOCK(&lk);
panic("workitem_free: type mismatch");
}
@@ -469,7 +470,7 @@ static int tickdelay = 2; /* number of ticks to pause during slowdown */
static int proc_waiting; /* tracks whether we have a timeout posted */
static int *stat_countp; /* statistic to count in proc_waiting timeout */
static struct callout_handle handle; /* handle on posted proc_waiting timeout */
-static struct proc *filesys_syncer; /* proc of filesystem syncer process */
+static struct thread *filesys_syncer; /* proc of filesystem syncer process */
static int req_clear_inodedeps; /* syncer process flush some inodedeps */
#define FLUSH_INODES 1
static int req_clear_remove; /* syncer process flush some freeblks */
@@ -518,7 +519,7 @@ add_to_worklist(wk)
static struct worklist *worklist_tail;
if (wk->wk_state & ONWORKLIST) {
- if (lk.lkt_held != -1)
+ if (lk.lkt_held != NOHOLDER)
FREE_LOCK(&lk);
panic("add_to_worklist: already on list");
}
@@ -544,7 +545,7 @@ int
softdep_process_worklist(matchmnt)
struct mount *matchmnt;
{
- struct proc *p = CURPROC;
+ struct thread *td = curthread;
int matchcnt, loopcount;
long starttime;
@@ -552,7 +553,7 @@ softdep_process_worklist(matchmnt)
* Record the process identifier of our caller so that we can give
* this process preferential treatment in request_cleanup below.
*/
- filesys_syncer = p;
+ filesys_syncer = td;
matchcnt = 0;
/*
@@ -571,12 +572,12 @@ softdep_process_worklist(matchmnt)
* If requested, try removing inode or removal dependencies.
*/
if (req_clear_inodedeps) {
- clear_inodedeps(p);
+ clear_inodedeps(td);
req_clear_inodedeps -= 1;
wakeup_one(&proc_waiting);
}
if (req_clear_remove) {
- clear_remove(p);
+ clear_remove(td);
req_clear_remove -= 1;
wakeup_one(&proc_waiting);
}
@@ -598,12 +599,12 @@ softdep_process_worklist(matchmnt)
* If requested, try removing inode or removal dependencies.
*/
if (req_clear_inodedeps) {
- clear_inodedeps(p);
+ clear_inodedeps(td);
req_clear_inodedeps -= 1;
wakeup_one(&proc_waiting);
}
if (req_clear_remove) {
- clear_remove(p);
+ clear_remove(td);
req_clear_remove -= 1;
wakeup_one(&proc_waiting);
}
@@ -658,7 +659,7 @@ process_worklist_item(matchmnt, flags)
dirrem = WK_DIRREM(wk);
vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev,
dirrem->dm_oldinum);
- if (vp == NULL || !VOP_ISLOCKED(vp, CURPROC))
+ if (vp == NULL || !VOP_ISLOCKED(vp, curthread))
break;
}
if (wk == 0) {
@@ -751,10 +752,10 @@ softdep_move_dependencies(oldbp, newbp)
* Purge the work list of all items associated with a particular mount point.
*/
int
-softdep_flushworklist(oldmnt, countp, p)
+softdep_flushworklist(oldmnt, countp, td)
struct mount *oldmnt;
int *countp;
- struct proc *p;
+ struct thread *td;
{
struct vnode *devvp;
int count, error = 0;
@@ -778,9 +779,9 @@ softdep_flushworklist(oldmnt, countp, p)
devvp = VFSTOUFS(oldmnt)->um_devvp;
while ((count = softdep_process_worklist(oldmnt)) > 0) {
*countp += count;
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_FSYNC(devvp, p->p_ucred, MNT_WAIT, p);
- VOP_UNLOCK(devvp, 0, p);
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_FSYNC(devvp, td->td_proc->p_ucred, MNT_WAIT, td);
+ VOP_UNLOCK(devvp, 0, td);
if (error)
break;
}
@@ -794,10 +795,10 @@ softdep_flushworklist(oldmnt, countp, p)
* Flush all vnodes and worklist items associated with a specified mount point.
*/
int
-softdep_flushfiles(oldmnt, flags, p)
+softdep_flushfiles(oldmnt, flags, td)
struct mount *oldmnt;
int flags;
- struct proc *p;
+ struct thread *td;
{
int error, count, loopcnt;
@@ -812,9 +813,9 @@ softdep_flushfiles(oldmnt, flags, p)
* Do another flush in case any vnodes were brought in
* as part of the cleanup operations.
*/
- if ((error = ffs_flushfiles(oldmnt, flags, p)) != 0)
+ if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0)
break;
- if ((error = softdep_flushworklist(oldmnt, &count, p)) != 0 ||
+ if ((error = softdep_flushworklist(oldmnt, &count, td)) != 0 ||
count == 0)
break;
}
@@ -885,7 +886,7 @@ pagedep_lookup(ip, lbn, flags, pagedeppp)
int i;
#ifdef DEBUG
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("pagedep_lookup: lock not held");
#endif
mp = ITOV(ip)->v_mount;
@@ -953,7 +954,7 @@ inodedep_lookup(fs, inum, flags, inodedeppp)
int firsttry;
#ifdef DEBUG
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("inodedep_lookup: lock not held");
#endif
firsttry = 1;
@@ -1226,7 +1227,7 @@ bmsafemap_lookup(bp)
struct worklist *wk;
#ifdef DEBUG
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("bmsafemap_lookup: lock not held");
#endif
LIST_FOREACH(wk, &bp->b_dep, wk_list)
@@ -1398,7 +1399,7 @@ allocdirect_merge(adphead, newadp, oldadp)
struct newdirblk *newdirblk;
#ifdef DEBUG
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("allocdirect_merge: lock not held");
#endif
if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
@@ -1986,7 +1987,7 @@ free_allocdirect(adphead, adp, delay)
struct worklist *wk;
#ifdef DEBUG
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("free_allocdirect: lock not held");
#endif
if ((adp->ad_state & DEPCOMPLETE) == 0)
@@ -2028,7 +2029,7 @@ free_newdirblk(newdirblk)
int i;
#ifdef DEBUG
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("free_newdirblk: lock not held");
#endif
/*
@@ -2351,7 +2352,7 @@ free_allocindir(aip, inodedep)
struct freefrag *freefrag;
#ifdef DEBUG
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("free_allocindir: lock not held");
#endif
if ((aip->ai_state & DEPCOMPLETE) == 0)
@@ -2609,7 +2610,7 @@ free_diradd(dap)
struct mkdir *mkdir, *nextmd;
#ifdef DEBUG
- if (lk.lkt_held == -1)
+ if (lk.lkt_held == NOHOLDER)
panic("free_diradd: lock not held");
#endif
WORKLIST_REMOVE(&dap->da_list);
@@ -2989,7 +2990,7 @@ static void
handle_workitem_remove(dirrem)
struct dirrem *dirrem;
{
- struct proc *p = CURPROC; /* XXX */
+ struct thread *td = curthread;
struct inodedep *inodedep;
struct vnode *vp;
struct inode *ip;
@@ -3038,7 +3039,7 @@ handle_workitem_remove(dirrem)
}
inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
FREE_LOCK(&lk);
- if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, p->p_ucred, p)) != 0)
+ if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, td->td_proc->p_ucred, td)) != 0)
softdep_error("handle_workitem_remove: truncate", error);
/*
* Rename a directory to a new parent. Since, we are both deleting
@@ -3422,9 +3423,10 @@ softdep_disk_write_complete(bp)
struct bmsafemap *bmsafemap;
#ifdef DEBUG
- if (lk.lkt_held != -1)
+#define SPECIAL_FLAG NOHOLDER-1
+ if (lk.lkt_held != NOHOLDER)
panic("softdep_disk_write_complete: lock is held");
- lk.lkt_held = -2;
+ lk.lkt_held = SPECIAL_FLAG;
#endif
LIST_INIT(&reattach);
while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
@@ -3490,7 +3492,7 @@ softdep_disk_write_complete(bp)
case D_INDIRDEP:
indirdep = WK_INDIRDEP(wk);
if (indirdep->ir_state & GOINGAWAY) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("disk_write_complete: indirdep gone");
}
bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
@@ -3501,7 +3503,7 @@ softdep_disk_write_complete(bp)
while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
handle_allocindir_partdone(aip);
if (aip == LIST_FIRST(&indirdep->ir_donehd)) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("disk_write_complete: not gone");
}
}
@@ -3512,7 +3514,7 @@ softdep_disk_write_complete(bp)
continue;
default:
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_disk_write_complete: Unknown type %s",
TYPENAME(wk->wk_type));
/* NOTREACHED */
@@ -3526,9 +3528,9 @@ softdep_disk_write_complete(bp)
WORKLIST_INSERT(&bp->b_dep, wk);
}
#ifdef DEBUG
- if (lk.lkt_held != -2)
+ if (lk.lkt_held != SPECIAL_FLAG)
panic("softdep_disk_write_complete: lock lost");
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
#endif
}
@@ -3548,7 +3550,7 @@ handle_allocdirect_partdone(adp)
if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
return;
if (adp->ad_buf != NULL) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_allocdirect_partdone: dangling dep");
}
/*
@@ -3586,7 +3588,7 @@ handle_allocdirect_partdone(adp)
if (listadp == adp)
break;
if (listadp == NULL) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_allocdirect_partdone: lost dep");
}
#endif /* DEBUG */
@@ -3622,7 +3624,7 @@ handle_allocindir_partdone(aip)
if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
return;
if (aip->ai_buf != NULL) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_allocindir_partdone: dangling dependency");
}
indirdep = aip->ai_indirdep;
@@ -3656,7 +3658,7 @@ handle_written_inodeblock(inodedep, bp)
int hadchanges;
if ((inodedep->id_state & IOSTARTED) == 0) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_inodeblock: not started");
}
inodedep->id_state &= ~IOSTARTED;
@@ -3687,12 +3689,12 @@ handle_written_inodeblock(inodedep, bp)
for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
nextadp = TAILQ_NEXT(adp, ad_next);
if (adp->ad_state & ATTACHED) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_inodeblock: new entry");
}
if (adp->ad_lbn < NDADDR) {
if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("%s: %s #%ld mismatch %d != %d",
"handle_written_inodeblock",
"direct pointer", adp->ad_lbn,
@@ -3701,7 +3703,7 @@ handle_written_inodeblock(inodedep, bp)
dp->di_db[adp->ad_lbn] = adp->ad_newblkno;
} else {
if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("%s: %s #%ld allocated as %d",
"handle_written_inodeblock",
"indirect pointer", adp->ad_lbn - NDADDR,
@@ -3719,7 +3721,7 @@ handle_written_inodeblock(inodedep, bp)
* Reset the file size to its most up-to-date value.
*/
if (inodedep->id_savedsize == -1) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_inodeblock: bad size");
}
if (dp->di_size != inodedep->id_savedsize) {
@@ -3759,7 +3761,7 @@ handle_written_inodeblock(inodedep, bp)
* have been freed.
*/
if (filefree != NULL) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_inodeblock: filefree");
}
filefree = wk;
@@ -3784,7 +3786,7 @@ handle_written_inodeblock(inodedep, bp)
continue;
default:
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_inodeblock: Unknown type %s",
TYPENAME(wk->wk_type));
/* NOTREACHED */
@@ -3792,7 +3794,7 @@ handle_written_inodeblock(inodedep, bp)
}
if (filefree != NULL) {
if (free_inodedep(inodedep) == 0) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_inodeblock: live inodedep");
}
add_to_worklist(filefree);
@@ -3842,7 +3844,7 @@ handle_written_mkdir(mkdir, type)
struct pagedep *pagedep;
if (mkdir->md_state != type) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_mkdir: bad type");
}
dap = mkdir->md_diradd;
@@ -3879,7 +3881,7 @@ handle_written_filepage(pagedep, bp)
int i, chgs;
if ((pagedep->pd_state & IOSTARTED) == 0) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_filepage: not started");
}
pagedep->pd_state &= ~IOSTARTED;
@@ -3907,7 +3909,7 @@ handle_written_filepage(pagedep, bp)
dap = nextdap) {
nextdap = LIST_NEXT(dap, da_pdlist);
if (dap->da_state & ATTACHED) {
- lk.lkt_held = -1;
+ lk.lkt_held = NOHOLDER;
panic("handle_written_filepage: attached");
}
ep = (struct direct *)
@@ -4124,7 +4126,7 @@ softdep_fsync(vp)
struct inode *ip;
struct buf *bp;
struct fs *fs;
- struct proc *p = CURPROC; /* XXX */
+ struct thread *td = curthread;
int error, flushparent;
ino_t parentino;
ufs_lbn_t lbn;
@@ -4190,9 +4192,9 @@ softdep_fsync(vp)
* ufs_lookup for details on possible races.
*/
FREE_LOCK(&lk);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
error = VFS_VGET(mnt, parentino, &pvp);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (error != 0)
return (error);
/*
@@ -4209,7 +4211,7 @@ softdep_fsync(vp)
return (error);
}
if ((pagedep->pd_state & NEWBLOCK) &&
- (error = VOP_FSYNC(pvp, p->p_ucred, MNT_WAIT, p))) {
+ (error = VOP_FSYNC(pvp, td->td_proc->p_ucred, MNT_WAIT, td))) {
vput(pvp);
return (error);
}
@@ -4217,7 +4219,7 @@ softdep_fsync(vp)
/*
* Flush directory page containing the inode's name.
*/
- error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), p->p_ucred,
+ error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_proc->p_ucred,
&bp);
if (error == 0)
error = BUF_WRITE(bp);
@@ -4294,7 +4296,7 @@ softdep_sync_metadata(ap)
struct vnode *a_vp;
struct ucred *a_cred;
int a_waitfor;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -4539,7 +4541,7 @@ loop:
if (vn_isdisk(vp, NULL) &&
vp->v_rdev->si_mountpoint && !VOP_ISLOCKED(vp, NULL) &&
(error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT, ap->a_cred,
- ap->a_p)) != 0)
+ ap->a_td)) != 0)
return (error);
return (0);
}
@@ -4643,7 +4645,7 @@ flush_pagedep_deps(pvp, mp, diraddhdp)
struct mount *mp;
struct diraddhd *diraddhdp;
{
- struct proc *p = CURPROC; /* XXX */
+ struct thread *td = curthread;
struct inodedep *inodedep;
struct ufsmount *ump;
struct diradd *dap;
@@ -4690,8 +4692,8 @@ flush_pagedep_deps(pvp, mp, diraddhdp)
FREE_LOCK(&lk);
if ((error = VFS_VGET(mp, inum, &vp)) != 0)
break;
- if ((error=VOP_FSYNC(vp, p->p_ucred, MNT_NOWAIT, p)) ||
- (error=VOP_FSYNC(vp, p->p_ucred, MNT_NOWAIT, p))) {
+ if ((error=VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_NOWAIT, td)) ||
+ (error=VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_NOWAIT, td))) {
vput(vp);
break;
}
@@ -4792,12 +4794,12 @@ request_cleanup(resource, islocked)
int resource;
int islocked;
{
- struct proc *p = CURPROC;
+ struct thread *td = curthread;
/*
* We never hold up the filesystem syncer process.
*/
- if (p == filesys_syncer)
+ if (td == filesys_syncer)
return (0);
/*
* First check to see if the work list has gotten backlogged.
@@ -4891,8 +4893,8 @@ pause_timer(arg)
* reduce the number of dirrem, freefile, and freeblks dependency structures.
*/
static void
-clear_remove(p)
- struct proc *p;
+clear_remove(td)
+ struct thread *td;
{
struct pagedep_hashhead *pagedephd;
struct pagedep *pagedep;
@@ -4920,7 +4922,7 @@ clear_remove(p)
vn_finished_write(mp);
return;
}
- if ((error = VOP_FSYNC(vp, p->p_ucred, MNT_NOWAIT, p)))
+ if ((error = VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_NOWAIT, td)))
softdep_error("clear_remove: fsync", error);
drain_output(vp, 0);
vput(vp);
@@ -4936,8 +4938,8 @@ clear_remove(p)
* the number of inodedep dependency structures.
*/
static void
-clear_inodedeps(p)
- struct proc *p;
+clear_inodedeps(td)
+ struct thread *td;
{
struct inodedep_hashhead *inodedephd;
struct inodedep *inodedep;
@@ -4994,10 +4996,10 @@ clear_inodedeps(p)
return;
}
if (ino == lastino) {
- if ((error = VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p)))
+ if ((error = VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_WAIT, td)))
softdep_error("clear_inodedeps: fsync1", error);
} else {
- if ((error = VOP_FSYNC(vp, p->p_ucred, MNT_NOWAIT, p)))
+ if ((error = VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_NOWAIT, td)))
softdep_error("clear_inodedeps: fsync2", error);
drain_output(vp, 0);
}
diff --git a/sys/ufs/ffs/ffs_softdep_stub.c b/sys/ufs/ffs/ffs_softdep_stub.c
index 7511533..c47fdb5 100644
--- a/sys/ufs/ffs/ffs_softdep_stub.c
+++ b/sys/ufs/ffs/ffs_softdep_stub.c
@@ -55,10 +55,10 @@
#include <ufs/ufs/ufs_extern.h>
int
-softdep_flushfiles(oldmnt, flags, p)
+softdep_flushfiles(oldmnt, flags, td)
struct mount *oldmnt;
int flags;
- struct proc *p;
+ struct thread *td;
{
panic("softdep_flushfiles called");
@@ -244,10 +244,10 @@ softdep_fsync_mountdev(vp)
}
int
-softdep_flushworklist(oldmnt, countp, p)
+softdep_flushworklist(oldmnt, countp, td)
struct mount *oldmnt;
int *countp;
- struct proc *p;
+ struct thread *td;
{
*countp = 0;
@@ -260,7 +260,7 @@ softdep_sync_metadata(ap)
struct vnode *a_vp;
struct ucred *a_cred;
int a_waitfor;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index f741036..48a8ba97 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -67,7 +67,7 @@
static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part");
static int ffs_sbupdate __P((struct ufsmount *, int));
-int ffs_reload __P((struct mount *,struct ucred *,struct proc *));
+int ffs_reload __P((struct mount *,struct ucred *,struct thread *));
static int ffs_oldfscompat __P((struct fs *));
static int ffs_init __P((struct vfsconf *));
@@ -132,12 +132,12 @@ VFS_SET(ufs_vfsops, ufs, 0);
* namei() if it is a genuine NULL from the user.
*/
int
-ffs_mount(mp, path, data, ndp, p)
+ffs_mount(mp, path, data, ndp, td)
struct mount *mp; /* mount struct pointer*/
char *path; /* path to mount point*/
caddr_t data; /* arguments to FS specific mount*/
struct nameidata *ndp; /* mount point credentials*/
- struct proc *p; /* process requesting mount*/
+ struct thread *td; /* process requesting mount*/
{
size_t size;
struct vnode *devvp;
@@ -156,10 +156,10 @@ ffs_mount(mp, path, data, ndp, p)
return (error);
}
- if ((error = ffs_mountfs(rootvp, mp, p, M_FFSNODE)) != 0)
+ if ((error = ffs_mountfs(rootvp, mp, td, M_FFSNODE)) != 0)
return (error);
- (void)VFS_STATFS(mp, &mp->mnt_stat, p);
+ (void)VFS_STATFS(mp, &mp->mnt_stat, td);
return (0);
}
@@ -184,9 +184,9 @@ ffs_mount(mp, path, data, ndp, p)
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
if (mp->mnt_flag & MNT_SOFTDEP) {
- error = softdep_flushfiles(mp, flags, p);
+ error = softdep_flushfiles(mp, flags, td);
} else {
- error = ffs_flushfiles(mp, flags, p);
+ error = ffs_flushfiles(mp, flags, td);
}
if (error) {
vn_finished_write(mp);
@@ -212,21 +212,21 @@ ffs_mount(mp, path, data, ndp, p)
vn_finished_write(mp);
}
if ((mp->mnt_flag & MNT_RELOAD) &&
- (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)) != 0)
+ (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, td)) != 0)
return (error);
if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
/*
* If upgrade to read-write by non-root, then verify
* that user has necessary permissions on the device.
*/
- if (p->p_ucred->cr_uid != 0) {
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
+ if (td->td_proc->p_ucred->cr_uid != 0) {
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
if ((error = VOP_ACCESS(devvp, VREAD | VWRITE,
- p->p_ucred, p)) != 0) {
- VOP_UNLOCK(devvp, 0, p);
+ td->td_proc->p_ucred, td)) != 0) {
+ VOP_UNLOCK(devvp, 0, td);
return (error);
}
- VOP_UNLOCK(devvp, 0, p);
+ VOP_UNLOCK(devvp, 0, td);
}
fs->fs_flags &= ~FS_UNCLEAN;
if (fs->fs_clean == 0) {
@@ -253,7 +253,7 @@ ffs_mount(mp, path, data, ndp, p)
}
/* check to see if we need to start softdep */
if ((fs->fs_flags & FS_DOSOFTDEP) &&
- (error = softdep_mount(devvp, mp, fs, p->p_ucred))){
+ (error = softdep_mount(devvp, mp, fs, td->td_proc->p_ucred))){
vn_finished_write(mp);
return (error);
}
@@ -286,7 +286,7 @@ ffs_mount(mp, path, data, ndp, p)
* Not an update, or updating the name: look up the name
* and verify that it refers to a sensible block device.
*/
- NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
+ NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, td);
if ((error = namei(ndp)) != 0)
return (error);
NDFREE(ndp, NDF_ONLY_PNBUF);
@@ -300,16 +300,16 @@ ffs_mount(mp, path, data, ndp, p)
* If mount by non-root, then verify that user has necessary
* permissions on the device.
*/
- if (p->p_ucred->cr_uid != 0) {
+ if (td->td_proc->p_ucred->cr_uid != 0) {
accessmode = VREAD;
if ((mp->mnt_flag & MNT_RDONLY) == 0)
accessmode |= VWRITE;
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
- if ((error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p))!= 0){
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ if ((error = VOP_ACCESS(devvp, accessmode, td->td_proc->p_ucred, td))!= 0){
vput(devvp);
return (error);
}
- VOP_UNLOCK(devvp, 0, p);
+ VOP_UNLOCK(devvp, 0, td);
}
if (mp->mnt_flag & MNT_UPDATE) {
@@ -335,7 +335,7 @@ ffs_mount(mp, path, data, ndp, p)
* the mount point is discarded by the upper level code.
* Note that vfs_mount() populates f_mntonname for us.
*/
- if ((error = ffs_mountfs(devvp, mp, p, M_FFSNODE)) != 0) {
+ if ((error = ffs_mountfs(devvp, mp, td, M_FFSNODE)) != 0) {
vrele(devvp);
return (error);
}
@@ -348,7 +348,7 @@ ffs_mount(mp, path, data, ndp, p)
/*
* Initialize filesystem stat information in mount struct.
*/
- (void)VFS_STATFS(mp, &mp->mnt_stat, p);
+ (void)VFS_STATFS(mp, &mp->mnt_stat, td);
return (0);
}
@@ -366,10 +366,10 @@ ffs_mount(mp, path, data, ndp, p)
* 6) re-read inode data for all active vnodes.
*/
int
-ffs_reload(mp, cred, p)
+ffs_reload(mp, cred, td)
register struct mount *mp;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
register struct vnode *vp, *nvp, *devvp;
struct inode *ip;
@@ -387,9 +387,9 @@ ffs_reload(mp, cred, p)
* Step 1: invalidate all cached meta-data.
*/
devvp = VFSTOUFS(mp)->um_devvp;
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
- error = vinvalbuf(devvp, 0, cred, p, 0, 0);
- VOP_UNLOCK(devvp, 0, p);
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = vinvalbuf(devvp, 0, cred, td, 0, 0);
+ VOP_UNLOCK(devvp, 0, td);
if (error)
panic("ffs_reload: dirty1");
@@ -400,16 +400,16 @@ ffs_reload(mp, cred, p)
* block device.
*/
if (vn_isdisk(devvp, NULL)) {
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
- vfs_object_create(devvp, p, p->p_ucred);
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ vfs_object_create(devvp, td, td->td_proc->p_ucred);
mtx_lock(&devvp->v_interlock);
- VOP_UNLOCK(devvp, LK_INTERLOCK, p);
+ VOP_UNLOCK(devvp, LK_INTERLOCK, td);
}
/*
* Step 2: re-read superblock from disk.
*/
- if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
+ if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, td) != 0)
size = DEV_BSIZE;
else
size = dpart.disklab->d_secsize;
@@ -486,16 +486,16 @@ loop:
/*
* Step 4: invalidate all inactive vnodes.
*/
- if (vrecycle(vp, NULL, p))
+ if (vrecycle(vp, NULL, td))
goto loop;
/*
* Step 5: invalidate all cached file data.
*/
mtx_lock(&vp->v_interlock);
- if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
+ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
goto loop;
}
- if (vinvalbuf(vp, 0, cred, p, 0, 0))
+ if (vinvalbuf(vp, 0, cred, td, 0, 0))
panic("ffs_reload: dirty2");
/*
* Step 6: re-read inode data for all active vnodes.
@@ -527,10 +527,10 @@ SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
* Common code for mount and mountroot
*/
int
-ffs_mountfs(devvp, mp, p, malloctype)
+ffs_mountfs(devvp, mp, td, malloctype)
register struct vnode *devvp;
struct mount *mp;
- struct proc *p;
+ struct thread *td;
struct malloc_type *malloctype;
{
register struct ufsmount *ump;
@@ -547,7 +547,7 @@ ffs_mountfs(devvp, mp, p, malloctype)
int ncount;
dev = devvp->v_rdev;
- cred = p ? p->p_ucred : NOCRED;
+ cred = td ? td->td_proc->p_ucred : NOCRED;
/*
* Disallow multiple mounts of the same device.
* Disallow mounting of a device that is currently in use
@@ -561,9 +561,9 @@ ffs_mountfs(devvp, mp, p, malloctype)
if (ncount > 1 && devvp != rootvp)
return (EBUSY);
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
- error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
- VOP_UNLOCK(devvp, 0, p);
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = vinvalbuf(devvp, V_SAVE, cred, td, 0, 0);
+ VOP_UNLOCK(devvp, 0, td);
if (error)
return (error);
@@ -574,16 +574,16 @@ ffs_mountfs(devvp, mp, p, malloctype)
* increases the opportunity for metadata caching.
*/
if (vn_isdisk(devvp, NULL)) {
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
- vfs_object_create(devvp, p, cred);
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ vfs_object_create(devvp, td, cred);
mtx_lock(&devvp->v_interlock);
- VOP_UNLOCK(devvp, LK_INTERLOCK, p);
+ VOP_UNLOCK(devvp, LK_INTERLOCK, td);
}
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
- VOP_UNLOCK(devvp, 0, p);
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, td);
+ VOP_UNLOCK(devvp, 0, td);
if (error)
return (error);
if (devvp->v_rdev->si_iosize_max > mp->mnt_iosize_max)
@@ -591,7 +591,7 @@ ffs_mountfs(devvp, mp, p, malloctype)
if (mp->mnt_iosize_max > MAXPHYS)
mp->mnt_iosize_max = MAXPHYS;
- if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
+ if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, td) != 0)
size = DEV_BSIZE;
else
size = dpart.disklab->d_secsize;
@@ -767,7 +767,7 @@ ffs_mountfs(devvp, mp, p, malloctype)
* This would all happen while the file system was busy/not
* available, so would effectively be "atomic".
*/
- (void) ufs_extattr_autostart(mp, p);
+ (void) ufs_extattr_autostart(mp, td);
#endif /* !UFS_EXTATTR_AUTOSTART */
#endif /* !UFS_EXTATTR */
return (0);
@@ -775,7 +775,7 @@ out:
devvp->v_rdev->si_mountpoint = NULL;
if (bp)
brelse(bp);
- (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
+ (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, td);
if (ump) {
free(ump->um_fs, M_UFSMNT);
free(ump, M_UFSMNT);
@@ -820,10 +820,10 @@ ffs_oldfscompat(fs)
* unmount system call
*/
int
-ffs_unmount(mp, mntflags, p)
+ffs_unmount(mp, mntflags, td)
struct mount *mp;
int mntflags;
- struct proc *p;
+ struct thread *td;
{
register struct ufsmount *ump = VFSTOUFS(mp);
register struct fs *fs;
@@ -834,7 +834,7 @@ ffs_unmount(mp, mntflags, p)
flags |= FORCECLOSE;
}
#ifdef UFS_EXTATTR
- if ((error = ufs_extattr_stop(mp, p))) {
+ if ((error = ufs_extattr_stop(mp, td))) {
if (error != EOPNOTSUPP)
printf("ffs_unmount: ufs_extattr_stop returned %d\n",
error);
@@ -843,10 +843,10 @@ ffs_unmount(mp, mntflags, p)
}
#endif
if (mp->mnt_flag & MNT_SOFTDEP) {
- if ((error = softdep_flushfiles(mp, flags, p)) != 0)
+ if ((error = softdep_flushfiles(mp, flags, td)) != 0)
return (error);
} else {
- if ((error = ffs_flushfiles(mp, flags, p)) != 0)
+ if ((error = ffs_flushfiles(mp, flags, td)) != 0)
return (error);
}
fs = ump->um_fs;
@@ -870,9 +870,9 @@ ffs_unmount(mp, mntflags, p)
}
ump->um_devvp->v_rdev->si_mountpoint = NULL;
- vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0);
+ vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, td, 0, 0);
error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
- NOCRED, p);
+ NOCRED, td);
vrele(ump->um_devvp);
@@ -888,10 +888,10 @@ ffs_unmount(mp, mntflags, p)
* Flush out all the files in a filesystem.
*/
int
-ffs_flushfiles(mp, flags, p)
+ffs_flushfiles(mp, flags, td)
register struct mount *mp;
int flags;
- struct proc *p;
+ struct thread *td;
{
register struct ufsmount *ump;
int error;
@@ -906,7 +906,7 @@ ffs_flushfiles(mp, flags, p)
for (i = 0; i < MAXQUOTAS; i++) {
if (ump->um_quotas[i] == NULLVP)
continue;
- quotaoff(p, mp, i);
+ quotaoff(td, mp, i);
}
/*
* Here we fall through to vflush again to ensure
@@ -931,9 +931,9 @@ ffs_flushfiles(mp, flags, p)
/*
* Flush filesystem metadata.
*/
- vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p);
- VOP_UNLOCK(ump->um_devvp, 0, p);
+ vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_FSYNC(ump->um_devvp, td->td_proc->p_ucred, MNT_WAIT, td);
+ VOP_UNLOCK(ump->um_devvp, 0, td);
return (error);
}
@@ -941,10 +941,10 @@ ffs_flushfiles(mp, flags, p)
* Get file system statistics.
*/
int
-ffs_statfs(mp, sbp, p)
+ffs_statfs(mp, sbp, td)
struct mount *mp;
register struct statfs *sbp;
- struct proc *p;
+ struct thread *td;
{
register struct ufsmount *ump;
register struct fs *fs;
@@ -980,11 +980,11 @@ ffs_statfs(mp, sbp, p)
* Note: we are always called with the filesystem marked `MPBUSY'.
*/
int
-ffs_sync(mp, waitfor, cred, p)
+ffs_sync(mp, waitfor, cred, td)
struct mount *mp;
int waitfor;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
struct vnode *nvp, *vp, *devvp;
struct inode *ip;
@@ -1028,15 +1028,15 @@ loop:
continue;
}
if (vp->v_type != VCHR) {
- if ((error = vget(vp, lockreq, p)) != 0) {
+ if ((error = vget(vp, lockreq, td)) != 0) {
mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto loop;
continue;
}
- if ((error = VOP_FSYNC(vp, cred, waitfor, p)) != 0)
+ if ((error = VOP_FSYNC(vp, cred, waitfor, td)) != 0)
allerror = error;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vrele(vp);
} else {
mtx_unlock(&vp->v_interlock);
@@ -1049,7 +1049,7 @@ loop:
* Force stale file system control information to be flushed.
*/
if (waitfor == MNT_WAIT) {
- if ((error = softdep_flushworklist(ump->um_mountp, &count, p)))
+ if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
allerror = error;
/* Flushed work items may create new vnodes to clean */
if (count) {
@@ -1065,10 +1065,10 @@ loop:
if (waitfor != MNT_LAZY &&
(devvp->v_numoutput > 0 || TAILQ_FIRST(&devvp->v_dirtyblkhd))) {
mtx_unlock(&devvp->v_interlock);
- vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
- if ((error = VOP_FSYNC(devvp, cred, waitfor, p)) != 0)
+ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
+ if ((error = VOP_FSYNC(devvp, cred, waitfor, td)) != 0)
allerror = error;
- VOP_UNLOCK(devvp, 0, p);
+ VOP_UNLOCK(devvp, 0, td);
if (waitfor == MNT_WAIT) {
mtx_lock(&mntvnode_mtx);
goto loop;
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index c50fe0f..78c32ea 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -130,7 +130,7 @@ ffs_fsync(ap)
struct vnode *a_vp;
struct ucred *a_cred;
int a_waitfor;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -147,7 +147,7 @@ ffs_fsync(ap)
* out from underneath us.
*/
if (ip->i_flags & SF_SNAPSHOT)
- VOP_UNLOCK(vp, 0, ap->a_p);
+ VOP_UNLOCK(vp, 0, ap->a_td);
wait = (ap->a_waitfor == MNT_WAIT);
if (vn_isdisk(vp, NULL)) {
lbn = INT_MAX;
@@ -291,6 +291,6 @@ loop:
splx(s);
error = UFS_UPDATE(vp, wait);
if (ip->i_flags & SF_SNAPSHOT)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_td);
return (error);
}
OpenPOWER on IntegriCloud