summaryrefslogtreecommitdiffstats
path: root/fs/pipe.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/pipe.c')
-rw-r--r--fs/pipe.c340
1 files changed, 206 insertions, 134 deletions
diff --git a/fs/pipe.c b/fs/pipe.c
index e2f4f1d..7fefb10 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -15,6 +15,7 @@
#include <linux/pipe_fs_i.h>
#include <linux/uio.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
@@ -35,7 +36,7 @@
*/
/* Drop the inode semaphore and wait for a pipe event, atomically */
-void pipe_wait(struct inode * inode)
+void pipe_wait(struct pipe_inode_info *pipe)
{
DEFINE_WAIT(wait);
@@ -43,11 +44,14 @@ void pipe_wait(struct inode * inode)
* Pipes are system-local resources, so sleeping on them
* is considered a noninteractive wait:
*/
- prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
- mutex_unlock(PIPE_MUTEX(*inode));
+ prepare_to_wait(&pipe->wait, &wait,
+ TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
+ if (pipe->inode)
+ mutex_unlock(&pipe->inode->i_mutex);
schedule();
- finish_wait(PIPE_WAIT(*inode), &wait);
- mutex_lock(PIPE_MUTEX(*inode));
+ finish_wait(&pipe->wait, &wait);
+ if (pipe->inode)
+ mutex_lock(&pipe->inode->i_mutex);
}
static int
@@ -90,32 +94,56 @@ pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len)
return 0;
}
-static void anon_pipe_buf_release(struct pipe_inode_info *info, struct pipe_buffer *buf)
+static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
struct page *page = buf->page;
- if (info->tmp_page) {
- __free_page(page);
- return;
- }
- info->tmp_page = page;
+ buf->flags &= ~PIPE_BUF_FLAG_STOLEN;
+
+ /*
+ * If nobody else uses this page, and we don't already have a
+ * temporary page, let's keep track of it as a one-deep
+ * allocation cache. (Otherwise just release our reference to it)
+ */
+ if (page_count(page) == 1 && !pipe->tmp_page)
+ pipe->tmp_page = page;
+ else
+ page_cache_release(page);
}
-static void *anon_pipe_buf_map(struct file *file, struct pipe_inode_info *info, struct pipe_buffer *buf)
+static void * anon_pipe_buf_map(struct file *file, struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
return kmap(buf->page);
}
-static void anon_pipe_buf_unmap(struct pipe_inode_info *info, struct pipe_buffer *buf)
+static void anon_pipe_buf_unmap(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
kunmap(buf->page);
}
+static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ buf->flags |= PIPE_BUF_FLAG_STOLEN;
+ return 0;
+}
+
+static void anon_pipe_buf_get(struct pipe_inode_info *info,
+ struct pipe_buffer *buf)
+{
+ page_cache_get(buf->page);
+}
+
static struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.map = anon_pipe_buf_map,
.unmap = anon_pipe_buf_unmap,
.release = anon_pipe_buf_release,
+ .steal = anon_pipe_buf_steal,
+ .get = anon_pipe_buf_get,
};
static ssize_t
@@ -123,7 +151,7 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
unsigned long nr_segs, loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
- struct pipe_inode_info *info;
+ struct pipe_inode_info *pipe;
int do_wakeup;
ssize_t ret;
struct iovec *iov = (struct iovec *)_iov;
@@ -136,13 +164,13 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
do_wakeup = 0;
ret = 0;
- mutex_lock(PIPE_MUTEX(*inode));
- info = inode->i_pipe;
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
for (;;) {
- int bufs = info->nrbufs;
+ int bufs = pipe->nrbufs;
if (bufs) {
- int curbuf = info->curbuf;
- struct pipe_buffer *buf = info->bufs + curbuf;
+ int curbuf = pipe->curbuf;
+ struct pipe_buffer *buf = pipe->bufs + curbuf;
struct pipe_buf_operations *ops = buf->ops;
void *addr;
size_t chars = buf->len;
@@ -151,11 +179,17 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
if (chars > total_len)
chars = total_len;
- addr = ops->map(filp, info, buf);
+ addr = ops->map(filp, pipe, buf);
+ if (IS_ERR(addr)) {
+ if (!ret)
+ ret = PTR_ERR(addr);
+ break;
+ }
error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars);
- ops->unmap(info, buf);
+ ops->unmap(pipe, buf);
if (unlikely(error)) {
- if (!ret) ret = -EFAULT;
+ if (!ret)
+ ret = -EFAULT;
break;
}
ret += chars;
@@ -163,10 +197,10 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
buf->len -= chars;
if (!buf->len) {
buf->ops = NULL;
- ops->release(info, buf);
+ ops->release(pipe, buf);
curbuf = (curbuf + 1) & (PIPE_BUFFERS-1);
- info->curbuf = curbuf;
- info->nrbufs = --bufs;
+ pipe->curbuf = curbuf;
+ pipe->nrbufs = --bufs;
do_wakeup = 1;
}
total_len -= chars;
@@ -175,9 +209,9 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
}
if (bufs) /* More to do? */
continue;
- if (!PIPE_WRITERS(*inode))
+ if (!pipe->writers)
break;
- if (!PIPE_WAITING_WRITERS(*inode)) {
+ if (!pipe->waiting_writers) {
/* syscall merging: Usually we must not sleep
* if O_NONBLOCK is set, or if we got some data.
* But if a writer sleeps in kernel space, then
@@ -191,20 +225,22 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
}
}
if (signal_pending(current)) {
- if (!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
- wake_up_interruptible_sync(PIPE_WAIT(*inode));
- kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
+ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
- pipe_wait(inode);
+ pipe_wait(pipe);
}
- mutex_unlock(PIPE_MUTEX(*inode));
- /* Signal writers asynchronously that there is more room. */
+ mutex_unlock(&inode->i_mutex);
+
+ /* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
- wake_up_interruptible(PIPE_WAIT(*inode));
- kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
+ wake_up_interruptible(&pipe->wait);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
file_accessed(filp);
@@ -215,6 +251,7 @@ static ssize_t
pipe_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
{
struct iovec iov = { .iov_base = buf, .iov_len = count };
+
return pipe_readv(filp, &iov, 1, ppos);
}
@@ -223,7 +260,7 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
unsigned long nr_segs, loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
- struct pipe_inode_info *info;
+ struct pipe_inode_info *pipe;
ssize_t ret;
int do_wakeup;
struct iovec *iov = (struct iovec *)_iov;
@@ -237,10 +274,10 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
do_wakeup = 0;
ret = 0;
- mutex_lock(PIPE_MUTEX(*inode));
- info = inode->i_pipe;
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
- if (!PIPE_READERS(*inode)) {
+ if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
goto out;
@@ -248,15 +285,25 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
/* We try to merge small writes */
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
- if (info->nrbufs && chars != 0) {
- int lastbuf = (info->curbuf + info->nrbufs - 1) & (PIPE_BUFFERS-1);
- struct pipe_buffer *buf = info->bufs + lastbuf;
+ if (pipe->nrbufs && chars != 0) {
+ int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
+ (PIPE_BUFFERS-1);
+ struct pipe_buffer *buf = pipe->bufs + lastbuf;
struct pipe_buf_operations *ops = buf->ops;
int offset = buf->offset + buf->len;
+
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
- void *addr = ops->map(filp, info, buf);
- int error = pipe_iov_copy_from_user(offset + addr, iov, chars);
- ops->unmap(info, buf);
+ void *addr;
+ int error;
+
+ addr = ops->map(filp, pipe, buf);
+ if (IS_ERR(addr)) {
+ error = PTR_ERR(addr);
+ goto out;
+ }
+ error = pipe_iov_copy_from_user(offset + addr, iov,
+ chars);
+ ops->unmap(pipe, buf);
ret = error;
do_wakeup = 1;
if (error)
@@ -271,16 +318,18 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
for (;;) {
int bufs;
- if (!PIPE_READERS(*inode)) {
+
+ if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
- if (!ret) ret = -EPIPE;
+ if (!ret)
+ ret = -EPIPE;
break;
}
- bufs = info->nrbufs;
+ bufs = pipe->nrbufs;
if (bufs < PIPE_BUFFERS) {
- int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS-1);
- struct pipe_buffer *buf = info->bufs + newbuf;
- struct page *page = info->tmp_page;
+ int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1);
+ struct pipe_buffer *buf = pipe->bufs + newbuf;
+ struct page *page = pipe->tmp_page;
int error;
if (!page) {
@@ -289,9 +338,9 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
ret = ret ? : -ENOMEM;
break;
}
- info->tmp_page = page;
+ pipe->tmp_page = page;
}
- /* Always wakeup, even if the copy fails. Otherwise
+ /* Always wake up, even if the copy fails. Otherwise
* we lock up (O_NONBLOCK-)readers that sleep due to
* syscall merging.
* FIXME! Is this really true?
@@ -304,7 +353,8 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
error = pipe_iov_copy_from_user(kmap(page), iov, chars);
kunmap(page);
if (unlikely(error)) {
- if (!ret) ret = -EFAULT;
+ if (!ret)
+ ret = -EFAULT;
break;
}
ret += chars;
@@ -314,8 +364,8 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
buf->len = chars;
- info->nrbufs = ++bufs;
- info->tmp_page = NULL;
+ pipe->nrbufs = ++bufs;
+ pipe->tmp_page = NULL;
total_len -= chars;
if (!total_len)
@@ -324,27 +374,29 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
if (bufs < PIPE_BUFFERS)
continue;
if (filp->f_flags & O_NONBLOCK) {
- if (!ret) ret = -EAGAIN;
+ if (!ret)
+ ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
- if (!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
- wake_up_interruptible_sync(PIPE_WAIT(*inode));
- kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
+ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
- PIPE_WAITING_WRITERS(*inode)++;
- pipe_wait(inode);
- PIPE_WAITING_WRITERS(*inode)--;
+ pipe->waiting_writers++;
+ pipe_wait(pipe);
+ pipe->waiting_writers--;
}
out:
- mutex_unlock(PIPE_MUTEX(*inode));
+ mutex_unlock(&inode->i_mutex);
if (do_wakeup) {
- wake_up_interruptible(PIPE_WAIT(*inode));
- kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
+ wake_up_interruptible(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0)
file_update_time(filp);
@@ -356,6 +408,7 @@ pipe_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
+
return pipe_writev(filp, &iov, 1, ppos);
}
@@ -366,7 +419,8 @@ bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
}
static ssize_t
-bad_pipe_w(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
+bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
{
return -EBADF;
}
@@ -376,21 +430,22 @@ pipe_ioctl(struct inode *pino, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
- struct pipe_inode_info *info;
+ struct pipe_inode_info *pipe;
int count, buf, nrbufs;
switch (cmd) {
case FIONREAD:
- mutex_lock(PIPE_MUTEX(*inode));
- info = inode->i_pipe;
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
count = 0;
- buf = info->curbuf;
- nrbufs = info->nrbufs;
+ buf = pipe->curbuf;
+ nrbufs = pipe->nrbufs;
while (--nrbufs >= 0) {
- count += info->bufs[buf].len;
+ count += pipe->bufs[buf].len;
buf = (buf+1) & (PIPE_BUFFERS-1);
}
- mutex_unlock(PIPE_MUTEX(*inode));
+ mutex_unlock(&inode->i_mutex);
+
return put_user(count, (int __user *)arg);
default:
return -EINVAL;
@@ -403,17 +458,17 @@ pipe_poll(struct file *filp, poll_table *wait)
{
unsigned int mask;
struct inode *inode = filp->f_dentry->d_inode;
- struct pipe_inode_info *info = inode->i_pipe;
+ struct pipe_inode_info *pipe = inode->i_pipe;
int nrbufs;
- poll_wait(filp, PIPE_WAIT(*inode), wait);
+ poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
- nrbufs = info->nrbufs;
+ nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
- if (!PIPE_WRITERS(*inode) && filp->f_version != PIPE_WCOUNTER(*inode))
+ if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= POLLHUP;
}
@@ -423,7 +478,7 @@ pipe_poll(struct file *filp, poll_table *wait)
* Most Unices do not set POLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
*/
- if (!PIPE_READERS(*inode))
+ if (!pipe->readers)
mask |= POLLERR;
}
@@ -433,17 +488,21 @@ pipe_poll(struct file *filp, poll_table *wait)
static int
pipe_release(struct inode *inode, int decr, int decw)
{
- mutex_lock(PIPE_MUTEX(*inode));
- PIPE_READERS(*inode) -= decr;
- PIPE_WRITERS(*inode) -= decw;
- if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
+ struct pipe_inode_info *pipe;
+
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+ pipe->readers -= decr;
+ pipe->writers -= decw;
+
+ if (!pipe->readers && !pipe->writers) {
free_pipe_info(inode);
} else {
- wake_up_interruptible(PIPE_WAIT(*inode));
- kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
- kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
+ wake_up_interruptible(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
- mutex_unlock(PIPE_MUTEX(*inode));
+ mutex_unlock(&inode->i_mutex);
return 0;
}
@@ -454,9 +513,9 @@ pipe_read_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
- mutex_lock(PIPE_MUTEX(*inode));
- retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
- mutex_unlock(PIPE_MUTEX(*inode));
+ mutex_lock(&inode->i_mutex);
+ retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
+ mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
@@ -471,9 +530,9 @@ pipe_write_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
- mutex_lock(PIPE_MUTEX(*inode));
- retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
- mutex_unlock(PIPE_MUTEX(*inode));
+ mutex_lock(&inode->i_mutex);
+ retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
+ mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
@@ -486,16 +545,17 @@ static int
pipe_rdwr_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = filp->f_dentry->d_inode;
+ struct pipe_inode_info *pipe = inode->i_pipe;
int retval;
- mutex_lock(PIPE_MUTEX(*inode));
+ mutex_lock(&inode->i_mutex);
- retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
+ retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
if (retval >= 0)
- retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
+ retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
- mutex_unlock(PIPE_MUTEX(*inode));
+ mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
@@ -534,9 +594,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
{
/* We could have perhaps used atomic_t, but this and friends
below are the only places. So it doesn't seem worthwhile. */
- mutex_lock(PIPE_MUTEX(*inode));
- PIPE_READERS(*inode)++;
- mutex_unlock(PIPE_MUTEX(*inode));
+ mutex_lock(&inode->i_mutex);
+ inode->i_pipe->readers++;
+ mutex_unlock(&inode->i_mutex);
return 0;
}
@@ -544,9 +604,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
static int
pipe_write_open(struct inode *inode, struct file *filp)
{
- mutex_lock(PIPE_MUTEX(*inode));
- PIPE_WRITERS(*inode)++;
- mutex_unlock(PIPE_MUTEX(*inode));
+ mutex_lock(&inode->i_mutex);
+ inode->i_pipe->writers++;
+ mutex_unlock(&inode->i_mutex);
return 0;
}
@@ -554,12 +614,12 @@ pipe_write_open(struct inode *inode, struct file *filp)
static int
pipe_rdwr_open(struct inode *inode, struct file *filp)
{
- mutex_lock(PIPE_MUTEX(*inode));
+ mutex_lock(&inode->i_mutex);
if (filp->f_mode & FMODE_READ)
- PIPE_READERS(*inode)++;
+ inode->i_pipe->readers++;
if (filp->f_mode & FMODE_WRITE)
- PIPE_WRITERS(*inode)++;
- mutex_unlock(PIPE_MUTEX(*inode));
+ inode->i_pipe->writers++;
+ mutex_unlock(&inode->i_mutex);
return 0;
}
@@ -642,37 +702,38 @@ static struct file_operations rdwr_pipe_fops = {
.fasync = pipe_rdwr_fasync,
};
-void free_pipe_info(struct inode *inode)
+struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
+{
+ struct pipe_inode_info *pipe;
+
+ pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
+ if (pipe) {
+ init_waitqueue_head(&pipe->wait);
+ pipe->r_counter = pipe->w_counter = 1;
+ pipe->inode = inode;
+ }
+
+ return pipe;
+}
+
+void __free_pipe_info(struct pipe_inode_info *pipe)
{
int i;
- struct pipe_inode_info *info = inode->i_pipe;
- inode->i_pipe = NULL;
for (i = 0; i < PIPE_BUFFERS; i++) {
- struct pipe_buffer *buf = info->bufs + i;
+ struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
- buf->ops->release(info, buf);
+ buf->ops->release(pipe, buf);
}
- if (info->tmp_page)
- __free_page(info->tmp_page);
- kfree(info);
+ if (pipe->tmp_page)
+ __free_page(pipe->tmp_page);
+ kfree(pipe);
}
-struct inode* pipe_new(struct inode* inode)
+void free_pipe_info(struct inode *inode)
{
- struct pipe_inode_info *info;
-
- info = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
- if (!info)
- goto fail_page;
- inode->i_pipe = info;
-
- init_waitqueue_head(PIPE_WAIT(*inode));
- PIPE_RCOUNTER(*inode) = PIPE_WCOUNTER(*inode) = 1;
-
- return inode;
-fail_page:
- return NULL;
+ __free_pipe_info(inode->i_pipe);
+ inode->i_pipe = NULL;
}
static struct vfsmount *pipe_mnt __read_mostly;
@@ -680,6 +741,7 @@ static int pipefs_delete_dentry(struct dentry *dentry)
{
return 1;
}
+
static struct dentry_operations pipefs_dentry_operations = {
.d_delete = pipefs_delete_dentry,
};
@@ -687,13 +749,17 @@ static struct dentry_operations pipefs_dentry_operations = {
static struct inode * get_pipe_inode(void)
{
struct inode *inode = new_inode(pipe_mnt->mnt_sb);
+ struct pipe_inode_info *pipe;
if (!inode)
goto fail_inode;
- if(!pipe_new(inode))
+ pipe = alloc_pipe_info(inode);
+ if (!pipe)
goto fail_iput;
- PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
+ inode->i_pipe = pipe;
+
+ pipe->readers = pipe->writers = 1;
inode->i_fop = &rdwr_pipe_fops;
/*
@@ -708,10 +774,12 @@ static struct inode * get_pipe_inode(void)
inode->i_gid = current->fsgid;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_blksize = PAGE_SIZE;
+
return inode;
fail_iput:
iput(inode);
+
fail_inode:
return NULL;
}
@@ -724,7 +792,7 @@ int do_pipe(int *fd)
struct inode * inode;
struct file *f1, *f2;
int error;
- int i,j;
+ int i, j;
error = -ENFILE;
f1 = get_empty_filp();
@@ -757,6 +825,7 @@ int do_pipe(int *fd)
dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this);
if (!dentry)
goto close_f12_inode_i_j;
+
dentry->d_op = &pipefs_dentry_operations;
d_add(dentry, inode);
f1->f_vfsmnt = f2->f_vfsmnt = mntget(mntget(pipe_mnt));
@@ -780,6 +849,7 @@ int do_pipe(int *fd)
fd_install(j, f2);
fd[0] = i;
fd[1] = j;
+
return 0;
close_f12_inode_i_j:
@@ -804,8 +874,9 @@ no_files:
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
-static struct super_block *pipefs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static struct super_block *
+pipefs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
{
return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC);
}
@@ -819,6 +890,7 @@ static struct file_system_type pipe_fs_type = {
static int __init init_pipe_fs(void)
{
int err = register_filesystem(&pipe_fs_type);
+
if (!err) {
pipe_mnt = kern_mount(&pipe_fs_type);
if (IS_ERR(pipe_mnt)) {
OpenPOWER on IntegriCloud