summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorwollman <wollman@FreeBSD.org>1997-12-15 03:09:59 +0000
committerwollman <wollman@FreeBSD.org>1997-12-15 03:09:59 +0000
commit1a06d8809828f2017adc8b31c76d4f07c3a92169 (patch)
treede4e7dc4bf84fe154d0df8264982bcf11791b066 /sys
parentb7cdca40ba7917454003700ecf7d97d1c402ac47 (diff)
downloadFreeBSD-src-1a06d8809828f2017adc8b31c76d4f07c3a92169.zip
FreeBSD-src-1a06d8809828f2017adc8b31c76d4f07c3a92169.tar.gz
Add support for poll(2) on files. vop_nopoll() now returns POLLNVAL
if one of the new poll types is requested; hopefully this will not break any existing code. (This is done so that programs have a dependable way of determining whether a filesystem supports the extended poll types or not.) The new poll types added are: POLLWRITE - file contents may have been modified POLLNLINK - file was linked, unlinked, or renamed POLLATTRIB - file's attributes may have been changed POLLEXTEND - file was extended Note that the internal operation of poll() means that it is impossible for two processes to reliably poll for the same event (this could be fixed but may not be worth it), so it is not possible to rewrite `tail -f' to use poll at this time.
Diffstat (limited to 'sys')
-rw-r--r--sys/fs/deadfs/dead_vnops.c25
-rw-r--r--sys/kern/vfs_default.c25
-rw-r--r--sys/kern/vfs_export.c86
-rw-r--r--sys/kern/vfs_subr.c86
-rw-r--r--sys/miscfs/deadfs/dead_vnops.c25
-rw-r--r--sys/sys/poll.h14
-rw-r--r--sys/sys/vnode.h20
-rw-r--r--sys/ufs/ufs/ufs_readwrite.c10
-rw-r--r--sys/ufs/ufs/ufs_vnops.c19
9 files changed, 291 insertions, 19 deletions
diff --git a/sys/fs/deadfs/dead_vnops.c b/sys/fs/deadfs/dead_vnops.c
index 99f58a6..fb11c2a 100644
--- a/sys/fs/deadfs/dead_vnops.c
+++ b/sys/fs/deadfs/dead_vnops.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)dead_vnops.c 8.1 (Berkeley) 6/10/93
- * $Id: dead_vnops.c,v 1.21 1997/10/26 20:55:10 phk Exp $
+ * $Id: dead_vnops.c,v 1.22 1997/12/05 19:55:41 bde Exp $
*/
#include <sys/param.h>
@@ -40,20 +40,22 @@
#include <sys/lock.h>
#include <sys/vnode.h>
#include <sys/buf.h>
+#include <sys/poll.h>
static int chkvnlock __P((struct vnode *));
/*
* Prototypes for dead operations on vnodes.
*/
static int dead_badop __P((void));
+static int dead_bmap __P((struct vop_bmap_args *));
+static int dead_ioctl __P((struct vop_ioctl_args *));
+static int dead_lock __P((struct vop_lock_args *));
static int dead_lookup __P((struct vop_lookup_args *));
static int dead_open __P((struct vop_open_args *));
+static int dead_poll __P((struct vop_poll_args *));
+static int dead_print __P((struct vop_print_args *));
static int dead_read __P((struct vop_read_args *));
static int dead_write __P((struct vop_write_args *));
-static int dead_ioctl __P((struct vop_ioctl_args *));
-static int dead_lock __P((struct vop_lock_args *));
-static int dead_bmap __P((struct vop_bmap_args *));
-static int dead_print __P((struct vop_print_args *));
vop_t **dead_vnodeop_p;
static struct vnodeopv_entry_desc dead_vnodeop_entries[] = {
@@ -73,6 +75,7 @@ static struct vnodeopv_entry_desc dead_vnodeop_entries[] = {
{ &vop_mmap_desc, (vop_t *) dead_badop },
{ &vop_open_desc, (vop_t *) dead_open },
{ &vop_pathconf_desc, (vop_t *) vop_ebadf }, /* per pathconf(2) */
+ { &vop_poll_desc, (vop_t *) dead_poll },
{ &vop_print_desc, (vop_t *) dead_print },
{ &vop_read_desc, (vop_t *) dead_read },
{ &vop_readdir_desc, (vop_t *) vop_ebadf },
@@ -288,3 +291,15 @@ chkvnlock(vp)
}
return (locked);
}
+
+/*
+ * Trivial poll routine that always returns POLLHUP.
+ * This is necessary so that a process which is polling a file
+ * gets notified when that file is revoke()d.
+ */
+static int
+dead_poll(ap)
+ struct vop_poll_args *ap;
+{
+ return (POLLHUP);
+}
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index adebcc7..59f6af4 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -231,13 +231,34 @@ vop_nopoll(ap)
struct proc *a_p;
} */ *ap;
{
-
/*
- * Just return what we were asked for.
+ * Return true for read/write. If the user asked for something
+ * special, return POLLNVAL, so that clients have a way of
+ * determining reliably whether or not the extended
+ * functionality is present without hard-coding knowledge
+ * of specific filesystem implementations.
*/
+ if (ap->a_events & ~POLLSTANDARD)
+ return (POLLNVAL);
+
return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
}
+/*
+ * Implement poll for local filesystems that support it.
+ */
+int
+vop_stdpoll(ap)
+ struct vop_poll_args /* {
+ struct vnode *a_vp;
+ int a_events;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
+}
+
int
vop_stdbwrite(ap)
struct vop_bwrite_args *ap;
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 254e9a7..c978bbf 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.113 1997/11/12 05:42:15 julian Exp $
+ * $Id: vfs_subr.c,v 1.114 1997/11/22 08:35:39 bde Exp $
*/
/*
@@ -1207,6 +1207,7 @@ vclean(vp, flags, p)
* Done with purge, notify sleepers of the grim news.
*/
vp->v_op = dead_vnodeop_p;
+ vn_pollgone(vp);
vp->v_tag = VT_NON;
vp->v_flag &= ~VXLOCK;
if (vp->v_flag & VXWANT) {
@@ -2111,3 +2112,86 @@ vbusy(vp)
simple_unlock(&vnode_free_list_slock);
vp->v_flag &= ~VFREE;
}
+
+/*
+ * Record a process's interest in events which might happen to
+ * a vnode. Because poll uses the historic select-style interface
+ * internally, this routine serves as both the ``check for any
+ * pending events'' and the ``record my interest in future events''
+ * functions. (These are done together, while the lock is held,
+ * to avoid race conditions.)
+ */
+int
+vn_pollrecord(vp, p, events)
+ struct vnode *vp;
+ struct proc *p;
+ short events;
+{
+ simple_lock(&vp->v_pollinfo.vpi_lock);
+ if (vp->v_pollinfo.vpi_revents & events) {
+ /*
+ * This leaves events we are not interested
+ * in available for the other process which
+ * which presumably had requested them
+ * (otherwise they would never have been
+ * recorded).
+ */
+ events &= vp->v_pollinfo.vpi_revents;
+ vp->v_pollinfo.vpi_revents &= ~events;
+
+ simple_unlock(&vp->v_pollinfo.vpi_lock);
+ return events;
+ }
+ vp->v_pollinfo.vpi_events |= events;
+ selrecord(p, &vp->v_pollinfo.vpi_selinfo);
+ simple_unlock(&vp->v_pollinfo.vpi_lock);
+ return 0;
+}
+
+/*
+ * Note the occurrence of an event. If the VN_POLLEVENT macro is used,
+ * it is possible for us to miss an event due to race conditions, but
+ * that condition is expected to be rare, so for the moment it is the
+ * preferred interface.
+ */
+void
+vn_pollevent(vp, events)
+ struct vnode *vp;
+ short events;
+{
+ simple_lock(&vp->v_pollinfo.vpi_lock);
+ if (vp->v_pollinfo.vpi_events & events) {
+ /*
+ * We clear vpi_events so that we don't
+ * call selwakeup() twice if two events are
+ * posted before the polling process(es) is
+ * awakened. This also ensures that we take at
+ * most one selwakeup() if the polling process
+ * is no longer interested. However, it does
+ * mean that only one event can be noticed at
+ * a time. (Perhaps we should only clear those
+ * event bits which we note?) XXX
+ */
+ vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */
+ vp->v_pollinfo.vpi_revents |= events;
+ selwakeup(&vp->v_pollinfo.vpi_selinfo);
+ }
+ simple_unlock(&vp->v_pollinfo.vpi_lock);
+}
+
+/*
+ * Wake up anyone polling on vp because it is being revoked.
+ * This depends on dead_poll() returning POLLHUP for correct
+ * behavior.
+ */
+void
+vn_pollgone(vp)
+ struct vnode *vp;
+{
+ simple_lock(&vp->v_pollinfo.vpi_lock);
+ if (vp->v_pollinfo.vpi_events) {
+ vp->v_pollinfo.vpi_events = 0;
+ selwakeup(&vp->v_pollinfo.vpi_selinfo);
+ }
+ simple_unlock(&vp->v_pollinfo.vpi_lock);
+}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 254e9a7..c978bbf 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.113 1997/11/12 05:42:15 julian Exp $
+ * $Id: vfs_subr.c,v 1.114 1997/11/22 08:35:39 bde Exp $
*/
/*
@@ -1207,6 +1207,7 @@ vclean(vp, flags, p)
* Done with purge, notify sleepers of the grim news.
*/
vp->v_op = dead_vnodeop_p;
+ vn_pollgone(vp);
vp->v_tag = VT_NON;
vp->v_flag &= ~VXLOCK;
if (vp->v_flag & VXWANT) {
@@ -2111,3 +2112,86 @@ vbusy(vp)
simple_unlock(&vnode_free_list_slock);
vp->v_flag &= ~VFREE;
}
+
+/*
+ * Record a process's interest in events which might happen to
+ * a vnode. Because poll uses the historic select-style interface
+ * internally, this routine serves as both the ``check for any
+ * pending events'' and the ``record my interest in future events''
+ * functions. (These are done together, while the lock is held,
+ * to avoid race conditions.)
+ */
+int
+vn_pollrecord(vp, p, events)
+ struct vnode *vp;
+ struct proc *p;
+ short events;
+{
+ simple_lock(&vp->v_pollinfo.vpi_lock);
+ if (vp->v_pollinfo.vpi_revents & events) {
+ /*
+ * This leaves events we are not interested
+ * in available for the other process which
+ * which presumably had requested them
+ * (otherwise they would never have been
+ * recorded).
+ */
+ events &= vp->v_pollinfo.vpi_revents;
+ vp->v_pollinfo.vpi_revents &= ~events;
+
+ simple_unlock(&vp->v_pollinfo.vpi_lock);
+ return events;
+ }
+ vp->v_pollinfo.vpi_events |= events;
+ selrecord(p, &vp->v_pollinfo.vpi_selinfo);
+ simple_unlock(&vp->v_pollinfo.vpi_lock);
+ return 0;
+}
+
+/*
+ * Note the occurrence of an event. If the VN_POLLEVENT macro is used,
+ * it is possible for us to miss an event due to race conditions, but
+ * that condition is expected to be rare, so for the moment it is the
+ * preferred interface.
+ */
+void
+vn_pollevent(vp, events)
+ struct vnode *vp;
+ short events;
+{
+ simple_lock(&vp->v_pollinfo.vpi_lock);
+ if (vp->v_pollinfo.vpi_events & events) {
+ /*
+ * We clear vpi_events so that we don't
+ * call selwakeup() twice if two events are
+ * posted before the polling process(es) is
+ * awakened. This also ensures that we take at
+ * most one selwakeup() if the polling process
+ * is no longer interested. However, it does
+ * mean that only one event can be noticed at
+ * a time. (Perhaps we should only clear those
+ * event bits which we note?) XXX
+ */
+ vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */
+ vp->v_pollinfo.vpi_revents |= events;
+ selwakeup(&vp->v_pollinfo.vpi_selinfo);
+ }
+ simple_unlock(&vp->v_pollinfo.vpi_lock);
+}
+
+/*
+ * Wake up anyone polling on vp because it is being revoked.
+ * This depends on dead_poll() returning POLLHUP for correct
+ * behavior.
+ */
+void
+vn_pollgone(vp)
+ struct vnode *vp;
+{
+ simple_lock(&vp->v_pollinfo.vpi_lock);
+ if (vp->v_pollinfo.vpi_events) {
+ vp->v_pollinfo.vpi_events = 0;
+ selwakeup(&vp->v_pollinfo.vpi_selinfo);
+ }
+ simple_unlock(&vp->v_pollinfo.vpi_lock);
+}
diff --git a/sys/miscfs/deadfs/dead_vnops.c b/sys/miscfs/deadfs/dead_vnops.c
index 99f58a6..fb11c2a 100644
--- a/sys/miscfs/deadfs/dead_vnops.c
+++ b/sys/miscfs/deadfs/dead_vnops.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)dead_vnops.c 8.1 (Berkeley) 6/10/93
- * $Id: dead_vnops.c,v 1.21 1997/10/26 20:55:10 phk Exp $
+ * $Id: dead_vnops.c,v 1.22 1997/12/05 19:55:41 bde Exp $
*/
#include <sys/param.h>
@@ -40,20 +40,22 @@
#include <sys/lock.h>
#include <sys/vnode.h>
#include <sys/buf.h>
+#include <sys/poll.h>
static int chkvnlock __P((struct vnode *));
/*
* Prototypes for dead operations on vnodes.
*/
static int dead_badop __P((void));
+static int dead_bmap __P((struct vop_bmap_args *));
+static int dead_ioctl __P((struct vop_ioctl_args *));
+static int dead_lock __P((struct vop_lock_args *));
static int dead_lookup __P((struct vop_lookup_args *));
static int dead_open __P((struct vop_open_args *));
+static int dead_poll __P((struct vop_poll_args *));
+static int dead_print __P((struct vop_print_args *));
static int dead_read __P((struct vop_read_args *));
static int dead_write __P((struct vop_write_args *));
-static int dead_ioctl __P((struct vop_ioctl_args *));
-static int dead_lock __P((struct vop_lock_args *));
-static int dead_bmap __P((struct vop_bmap_args *));
-static int dead_print __P((struct vop_print_args *));
vop_t **dead_vnodeop_p;
static struct vnodeopv_entry_desc dead_vnodeop_entries[] = {
@@ -73,6 +75,7 @@ static struct vnodeopv_entry_desc dead_vnodeop_entries[] = {
{ &vop_mmap_desc, (vop_t *) dead_badop },
{ &vop_open_desc, (vop_t *) dead_open },
{ &vop_pathconf_desc, (vop_t *) vop_ebadf }, /* per pathconf(2) */
+ { &vop_poll_desc, (vop_t *) dead_poll },
{ &vop_print_desc, (vop_t *) dead_print },
{ &vop_read_desc, (vop_t *) dead_read },
{ &vop_readdir_desc, (vop_t *) vop_ebadf },
@@ -288,3 +291,15 @@ chkvnlock(vp)
}
return (locked);
}
+
+/*
+ * Trivial poll routine that always returns POLLHUP.
+ * This is necessary so that a process which is polling a file
+ * gets notified when that file is revoke()d.
+ */
+static int
+dead_poll(ap)
+ struct vop_poll_args *ap;
+{
+ return (POLLHUP);
+}
diff --git a/sys/sys/poll.h b/sys/sys/poll.h
index 425ee1d..89468eb 100644
--- a/sys/sys/poll.h
+++ b/sys/sys/poll.h
@@ -25,7 +25,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: poll.h,v 1.1 1997/09/14 02:20:56 peter Exp $
+ * $Id: poll.h,v 1.2 1997/09/14 05:38:03 peter Exp $
*/
#ifndef _SYS_POLL_H_
@@ -63,6 +63,15 @@ struct pollfd {
#define POLLWRBAND 0x0100 /* OOB/Urgent data can be written */
/*
+ * FreeBSD extensions: polling on a regular file might return one
+ * of these events (currently only supported on UFS).
+ */
+#define POLLEXTEND 0x0200 /* file may have been extended */
+#define POLLATTRIB 0x0400 /* file attributes may have changed */
+#define POLLNLINK 0x0800 /* (un)link/rename may have happened */
+#define POLLWRITE 0x1000 /* file's contents may have changed */
+
+/*
* These events are set if they occur regardless of whether they were
* requested.
*/
@@ -70,6 +79,9 @@ struct pollfd {
#define POLLHUP 0x0010 /* file descriptor was "hung up" */
#define POLLNVAL 0x0020 /* requested events "invalid" */
+#define POLLSTANDARD (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
+ POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
+
/*
* Request that poll wait forever.
* XXX this is in stropts.h in SYSV, and not #included by poll.h
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 08be466..fec331a 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -31,13 +31,14 @@
* SUCH DAMAGE.
*
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
- * $Id: vnode.h,v 1.57 1997/11/22 08:35:43 bde Exp $
+ * $Id: vnode.h,v 1.58 1997/12/05 19:55:49 bde Exp $
*/
#ifndef _SYS_VNODE_H_
#define _SYS_VNODE_H_
#include <sys/queue.h>
+#include <sys/select.h> /* needed for struct selinfo in vnodes */
#include <machine/lock.h>
@@ -78,6 +79,7 @@ struct namecache;
* v_mntvnodes is locked by the global mntvnodes simple lock.
* v_flag, v_usecount, v_holdcount and v_writecount are
* locked by the v_interlock simple lock.
+ * v_pollinfo is locked by the lock contained inside it.
*/
struct vnode {
u_long v_flag; /* vnode flags (see below) */
@@ -114,12 +116,24 @@ struct vnode {
TAILQ_HEAD(, namecache) v_cache_dst; /* Cache entries to us */
struct vnode *v_dd; /* .. vnode */
u_long v_ddid; /* .. capability identifier */
+ struct {
+ struct simplelock vpi_lock; /* lock to protect below */
+ struct selinfo vpi_selinfo; /* identity of poller(s) */
+ short vpi_events; /* what they are looking for */
+ short vpi_revents; /* what has happened */
+ } v_pollinfo;
};
#define v_mountedhere v_un.vu_mountedhere
#define v_socket v_un.vu_socket
#define v_specinfo v_un.vu_specinfo
#define v_fifoinfo v_un.vu_fifoinfo
+#define VN_POLLEVENT(vp, events) \
+ do { \
+ if ((vp)->v_pollinfo.vpi_events & (events)) \
+ vn_pollevent((vp), (events)); \
+ } while (0)
+
/*
* Vnode flags.
*/
@@ -473,6 +487,9 @@ int vn_close __P((struct vnode *vp,
int flags, struct ucred *cred, struct proc *p));
int vn_lock __P((struct vnode *vp, int flags, struct proc *p));
int vn_open __P((struct nameidata *ndp, int fmode, int cmode));
+void vn_pollevent __P((struct vnode *vp, int events));
+void vn_pollgone __P((struct vnode *vp));
+int vn_pollrecord __P((struct vnode *vp, struct proc *p, int events));
int vn_rdwr __P((enum uio_rw rw, struct vnode *vp, caddr_t base,
int len, off_t offset, enum uio_seg segflg, int ioflg,
struct ucred *cred, int *aresid, struct proc *p));
@@ -490,6 +507,7 @@ int vop_nolock __P((struct vop_lock_args *));
int vop_nopoll __P((struct vop_poll_args *));
int vop_nounlock __P((struct vop_unlock_args *));
int vop_stdpathconf __P((struct vop_pathconf_args *));
+int vop_stdpoll __P((struct vop_poll_args *));
int vop_revoke __P((struct vop_revoke_args *));
int vop_sharedlock __P((struct vop_lock_args *));
int vop_eopnotsupp __P((struct vop_generic_args *ap));
diff --git a/sys/ufs/ufs/ufs_readwrite.c b/sys/ufs/ufs/ufs_readwrite.c
index f9f6d06..35da9cd 100644
--- a/sys/ufs/ufs/ufs_readwrite.c
+++ b/sys/ufs/ufs/ufs_readwrite.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
- * $Id: ufs_readwrite.c,v 1.32 1997/10/16 10:50:21 phk Exp $
+ * $Id: ufs_readwrite.c,v 1.33 1997/10/16 20:32:39 phk Exp $
*/
#ifdef LFS_READWRITE
@@ -55,6 +55,7 @@
#include <vm/vm.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
+#include <sys/poll.h>
#endif
/*
@@ -187,9 +188,10 @@ WRITE(ap)
struct proc *p;
ufs_daddr_t lbn;
off_t osize;
- int blkoffset, error, flags, ioflag, resid, size, xfersize;
+ int blkoffset, error, extended, flags, ioflag, resid, size, xfersize;
struct timeval tv;
+ extended = 0;
ioflag = ap->a_ioflag;
uio = ap->a_uio;
vp = ap->a_vp;
@@ -264,6 +266,7 @@ WRITE(ap)
if (uio->uio_offset + xfersize > ip->i_size) {
ip->i_size = uio->uio_offset + xfersize;
+ extended = 1;
}
size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
@@ -314,6 +317,9 @@ WRITE(ap)
gettime(&tv);
error = UFS_UPDATE(vp, &tv, &tv, 1);
}
+ if (!error)
+ VN_POLLEVENT(vp, POLLWRITE | (extended ? POLLEXTEND : 0));
+
return (error);
}
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index a160af3..ffec862 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_vnops.c 8.27 (Berkeley) 5/27/95
- * $Id: ufs_vnops.c,v 1.70 1997/12/12 14:14:44 peter Exp $
+ * $Id: ufs_vnops.c,v 1.71 1997/12/13 12:30:34 bde Exp $
*/
#include "opt_quota.h"
@@ -55,6 +55,7 @@
#include <sys/malloc.h>
#include <sys/dirent.h>
#include <sys/lockf.h>
+#include <sys/poll.h>
#include <miscfs/specfs/specdev.h>
#include <miscfs/fifofs/fifo.h>
@@ -135,6 +136,7 @@ ufs_create(ap)
ap->a_dvp, ap->a_vpp, ap->a_cnp);
if (error)
return (error);
+ VN_POLLEVENT(ap->a_dvp, POLLWRITE);
return (0);
}
@@ -160,6 +162,7 @@ ufs_mknod(ap)
ap->a_dvp, vpp, ap->a_cnp);
if (error)
return (error);
+ VN_POLLEVENT(ap->a_dvp, POLLWRITE);
ip = VTOI(*vpp);
ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
if (vap->va_rdev != VNOVAL) {
@@ -469,6 +472,7 @@ ufs_setattr(ap)
return (EROFS);
error = ufs_chmod(vp, (int)vap->va_mode, cred, p);
}
+ VN_POLLEVENT(vp, POLLATTRIB);
return (error);
}
@@ -650,6 +654,8 @@ ufs_remove(ap)
ip->i_nlink--;
ip->i_flag |= IN_CHANGE;
}
+ VN_POLLEVENT(vp, POLLNLINK);
+ VN_POLLEVENT(dvp, POLLWRITE);
out:
if (dvp == vp)
vrele(vp);
@@ -719,6 +725,8 @@ out1:
if (tdvp != vp)
VOP_UNLOCK(vp, 0, p);
out2:
+ VN_POLLEVENT(vp, POLLNLINK);
+ VN_POLLEVENT(tdvp, POLLWRITE);
vput(tdvp);
return (error);
}
@@ -936,6 +944,7 @@ abortit:
oldparent = dp->i_number;
doingdirectory++;
}
+ VN_POLLEVENT(fdvp, POLLWRITE);
vrele(fdvp);
/*
@@ -1030,6 +1039,7 @@ abortit:
}
goto bad;
}
+ VN_POLLEVENT(tdvp, POLLWRITE);
vput(tdvp);
} else {
if (xp->i_dev != dp->i_dev || xp->i_dev != ip->i_dev)
@@ -1085,6 +1095,7 @@ abortit:
dp->i_nlink--;
dp->i_flag |= IN_CHANGE;
}
+ VN_POLLEVENT(tdvp, POLLWRITE);
vput(tdvp);
/*
* Adjust the link count of the target to
@@ -1104,6 +1115,7 @@ abortit:
tcnp->cn_cred, tcnp->cn_proc);
}
xp->i_flag |= IN_CHANGE;
+ VN_POLLEVENT(tvp, POLLNLINK);
vput(tvp);
xp = NULL;
}
@@ -1381,6 +1393,7 @@ ufs_mkdir(ap)
dp->i_nlink--;
dp->i_flag |= IN_CHANGE;
}
+ VN_POLLEVENT(dvp, POLLWRITE);
bad:
/*
* No need to do an explicit VOP_TRUNCATE here, vrele will do this
@@ -1444,6 +1457,7 @@ ufs_rmdir(ap)
error = ufs_dirremove(dvp, cnp);
if (error)
goto out;
+ VN_POLLEVENT(dvp, POLLWRITE|POLLNLINK);
dp->i_nlink--;
dp->i_flag |= IN_CHANGE;
cache_purge(dvp);
@@ -1464,6 +1478,7 @@ ufs_rmdir(ap)
error = UFS_TRUNCATE(vp, (off_t)0, IO_SYNC, cnp->cn_cred,
cnp->cn_proc);
cache_purge(ITOV(ip));
+ VN_POLLEVENT(vp, POLLNLINK);
out:
if (dvp)
vput(dvp);
@@ -1492,6 +1507,7 @@ ufs_symlink(ap)
vpp, ap->a_cnp);
if (error)
return (error);
+ VN_POLLEVENT(ap->a_dvp, POLLWRITE);
vp = *vpp;
len = strlen(ap->a_target);
if (len < vp->v_mount->mnt_maxsymlinklen) {
@@ -2132,6 +2148,7 @@ static struct vnodeopv_entry_desc ufs_vnodeop_entries[] = {
{ &vop_mmap_desc, (vop_t *) ufs_mmap },
{ &vop_open_desc, (vop_t *) ufs_open },
{ &vop_pathconf_desc, (vop_t *) ufs_pathconf },
+ { &vop_poll_desc, (vop_t *) vop_stdpoll },
{ &vop_print_desc, (vop_t *) ufs_print },
{ &vop_readdir_desc, (vop_t *) ufs_readdir },
{ &vop_readlink_desc, (vop_t *) ufs_readlink },
OpenPOWER on IntegriCloud