summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>2005-02-10 12:28:58 +0000
committerphk <phk@FreeBSD.org>2005-02-10 12:28:58 +0000
commit5dd8d305754a018754a81424da7eb8d297b122eb (patch)
tree2b53208819d7c5ad3ab3fcf73a515a5cc7f5009c /sys/kern
parentdc9f809dd574faccecacee96f8fafcefeb7151aa (diff)
downloadFreeBSD-src-5dd8d305754a018754a81424da7eb8d297b122eb.zip
FreeBSD-src-5dd8d305754a018754a81424da7eb8d297b122eb.tar.gz
Make various vnode related functions static
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_bio.c6
-rw-r--r--sys/kern/vfs_subr.c74
2 files changed, 12 insertions, 68 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 5039e87..5a50075 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -104,7 +104,7 @@ static int vfs_bio_clcheck(struct vnode *vp, int size,
daddr_t lblkno, daddr_t blkno);
static int flushbufqueues(int flushdeps);
static void buf_daemon(void);
-void bremfreel(struct buf *bp);
+static void bremfreel(struct buf *bp);
int vmiodirenable = TRUE;
SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
@@ -674,7 +674,7 @@ bremfreef(struct buf *bp)
* Removes a buffer from the free list, must be called with the
* bqlock held.
*/
-void
+static void
bremfreel(struct buf *bp)
{
int s = splbio();
@@ -2054,7 +2054,7 @@ buf_daemon()
* free up B_INVAL buffers instead of write them, which NFS is
* particularly sensitive to.
*/
-int flushwithdeps = 0;
+static int flushwithdeps = 0;
SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
0, "Number of buffers flushed with dependecies that require rollbacks");
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 1bc0ee0..1bb181f 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -90,7 +90,9 @@ static void syncer_shutdown(void *arg, int howto);
static int vtryrecycle(struct vnode *vp);
static void vx_lock(struct vnode *vp);
static void vx_unlock(struct vnode *vp);
-
+static void vbusy(struct vnode *vp);
+static void vdropl(struct vnode *vp);
+static void vholdl(struct vnode *);
/*
* Enable Giant pushdown based on whether or not the vm is mpsafe in this
@@ -1984,7 +1986,7 @@ vhold(struct vnode *vp)
VI_UNLOCK(vp);
}
-void
+static void
vholdl(struct vnode *vp)
{
@@ -2006,9 +2008,8 @@ vdrop(struct vnode *vp)
VI_UNLOCK(vp);
}
-void
-vdropl(vp)
- struct vnode *vp;
+static void
+vdropl(struct vnode *vp)
{
if (vp->v_holdcnt <= 0)
@@ -2358,8 +2359,6 @@ vgonel(struct vnode *vp, struct thread *td)
*/
vp->v_vnlock = &vp->v_lock;
vp->v_op = &dead_vnodeops;
- if (vp->v_pollinfo != NULL)
- vn_pollgone(vp);
vp->v_tag = "none";
VI_UNLOCK(vp);
@@ -2606,8 +2605,8 @@ vfs_sysctl(SYSCTL_HANDLER_ARGS)
return (EOPNOTSUPP);
}
-SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
- "Generic filesystem");
+static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
+ vfs_sysctl, "Generic filesystem");
#if 1 || defined(COMPAT_PRELITE2)
@@ -2851,7 +2850,7 @@ vfree(struct vnode *vp)
/*
* Opposite of vfree() - mark a vnode as in use.
*/
-void
+static void
vbusy(struct vnode *vp)
{
@@ -2924,61 +2923,6 @@ vn_pollrecord(vp, td, events)
}
/*
- * Note the occurrence of an event. If the VN_POLLEVENT macro is used,
- * it is possible for us to miss an event due to race conditions, but
- * that condition is expected to be rare, so for the moment it is the
- * preferred interface.
- */
-void
-vn_pollevent(vp, events)
- struct vnode *vp;
- short events;
-{
-
- if (vp->v_pollinfo == NULL)
- v_addpollinfo(vp);
- mtx_lock(&vp->v_pollinfo->vpi_lock);
- if (vp->v_pollinfo->vpi_events & events) {
- /*
- * We clear vpi_events so that we don't
- * call selwakeup() twice if two events are
- * posted before the polling process(es) is
- * awakened. This also ensures that we take at
- * most one selwakeup() if the polling process
- * is no longer interested. However, it does
- * mean that only one event can be noticed at
- * a time. (Perhaps we should only clear those
- * event bits which we note?) XXX
- */
- vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */
- vp->v_pollinfo->vpi_revents |= events;
- selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
- }
- mtx_unlock(&vp->v_pollinfo->vpi_lock);
-}
-
-/*
- * Wake up anyone polling on vp because it is being revoked.
- * This depends on dead_poll() returning POLLHUP for correct
- * behavior.
- */
-void
-vn_pollgone(vp)
- struct vnode *vp;
-{
-
- mtx_lock(&vp->v_pollinfo->vpi_lock);
- VN_KNOTE_LOCKED(vp, NOTE_REVOKE);
- if (vp->v_pollinfo->vpi_events) {
- vp->v_pollinfo->vpi_events = 0;
- selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
- }
- mtx_unlock(&vp->v_pollinfo->vpi_lock);
-}
-
-
-
-/*
* Routine to create and manage a filesystem syncer vnode.
*/
#define sync_close ((int (*)(struct vop_close_args *))nullop)
OpenPOWER on IntegriCloud