summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c4
-rw-r--r--fs/block_dev.c5
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifssmb.c34
-rw-r--r--fs/cifs/file.c28
-rw-r--r--fs/fs-writeback.c133
-rw-r--r--fs/fscache/stats.c4
-rw-r--r--fs/nfs/nfs4proc.c3
-rw-r--r--fs/nilfs2/alloc.c2
-rw-r--r--fs/nilfs2/btree.c2
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/proc/task_mmu.c27
12 files changed, 150 insertions, 95 deletions
diff --git a/fs/bio.c b/fs/bio.c
index e1f9221..e7bf6ca 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -554,7 +554,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
.bi_rw = bio->bi_rw,
};
- if (q->merge_bvec_fn(q, &bvm, prev) < len) {
+ if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
prev->bv_len -= len;
return 0;
}
@@ -607,7 +607,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* merge_bvec_fn() returns number of bytes it can accept
* at this offset
*/
- if (q->merge_bvec_fn(q, &bvm, bvec) < len) {
+ if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index d11d028..2a6d019 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -404,7 +404,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
* NULL first argument is nfsd_sync_dir() and that's not a directory.
*/
-static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
+int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
struct block_device *bdev = I_BDEV(filp->f_mapping->host);
int error;
@@ -418,6 +418,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
error = 0;
return error;
}
+EXPORT_SYMBOL(blkdev_fsync);
/*
* pseudo-fs
@@ -1481,7 +1482,7 @@ const struct file_operations def_blk_fops = {
.aio_read = generic_file_aio_read,
.aio_write = blkdev_aio_write,
.mmap = generic_file_mmap,
- .fsync = block_fsync,
+ .fsync = blkdev_fsync,
.unlocked_ioctl = block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_blkdev_ioctl,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 5183bc2..ded66be 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -808,6 +808,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.release = cifs_close,
.fsync = cifs_fsync,
.flush = cifs_flush,
+ .mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
#ifdef CONFIG_CIFS_POSIX
.unlocked_ioctl = cifs_ioctl,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 3f4fbd6..5d3f29f 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1431,6 +1431,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
__u32 bytes_sent;
__u16 byte_count;
+ *nbytes = 0;
+
/* cFYI(1, ("write at %lld %d bytes", offset, count));*/
if (tcon->ses == NULL)
return -ECONNABORTED;
@@ -1513,11 +1515,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
cifs_stats_inc(&tcon->num_writes);
if (rc) {
cFYI(1, ("Send error in write = %d", rc));
- *nbytes = 0;
} else {
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
+
+ /*
+ * Mask off high 16 bits when bytes written as returned by the
+ * server is greater than bytes requested by the client. Some
+ * OS/2 servers are known to set incorrect CountHigh values.
+ */
+ if (*nbytes > count)
+ *nbytes &= 0xFFFF;
}
cifs_buf_release(pSMB);
@@ -1606,6 +1615,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
+
+ /*
+ * Mask off high 16 bits when bytes written as returned by the
+ * server is greater than bytes requested by the client. OS/2
+ * servers are known to set incorrect CountHigh values.
+ */
+ if (*nbytes > count)
+ *nbytes &= 0xFFFF;
}
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
@@ -1794,8 +1811,21 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
}
parm_data = (struct cifs_posix_lock *)
((char *)&pSMBr->hdr.Protocol + data_offset);
- if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK))
+ if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_UNLCK))
pLockData->fl_type = F_UNLCK;
+ else {
+ if (parm_data->lock_type ==
+ __constant_cpu_to_le16(CIFS_RDLCK))
+ pLockData->fl_type = F_RDLCK;
+ else if (parm_data->lock_type ==
+ __constant_cpu_to_le16(CIFS_WRLCK))
+ pLockData->fl_type = F_WRLCK;
+
+ pLockData->fl_start = parm_data->start;
+ pLockData->fl_end = parm_data->start +
+ parm_data->length - 1;
+ pLockData->fl_pid = parm_data->pid;
+ }
}
plk_err_exit:
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 058b390..9b11a8f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -839,8 +839,32 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
} else {
/* if rc == ERR_SHARING_VIOLATION ? */
- rc = 0; /* do not change lock type to unlock
- since range in use */
+ rc = 0;
+
+ if (lockType & LOCKING_ANDX_SHARED_LOCK) {
+ pfLock->fl_type = F_WRLCK;
+ } else {
+ rc = CIFSSMBLock(xid, tcon, netfid, length,
+ pfLock->fl_start, 0, 1,
+ lockType | LOCKING_ANDX_SHARED_LOCK,
+ 0 /* wait flag */);
+ if (rc == 0) {
+ rc = CIFSSMBLock(xid, tcon, netfid,
+ length, pfLock->fl_start, 1, 0,
+ lockType |
+ LOCKING_ANDX_SHARED_LOCK,
+ 0 /* wait flag */);
+ pfLock->fl_type = F_RDLCK;
+ if (rc != 0)
+ cERROR(1, ("Error unlocking "
+ "previously locked range %d "
+ "during test of lock", rc));
+ rc = 0;
+ } else {
+ pfLock->fl_type = F_WRLCK;
+ rc = 0;
+ }
+ }
}
FreeXid(xid);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 781a322..4b37f7c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -554,108 +554,85 @@ select_queue:
return ret;
}
-static void unpin_sb_for_writeback(struct super_block **psb)
+static void unpin_sb_for_writeback(struct super_block *sb)
{
- struct super_block *sb = *psb;
-
- if (sb) {
- up_read(&sb->s_umount);
- put_super(sb);
- *psb = NULL;
- }
+ up_read(&sb->s_umount);
+ put_super(sb);
}
+enum sb_pin_state {
+ SB_PINNED,
+ SB_NOT_PINNED,
+ SB_PIN_FAILED
+};
+
/*
* For WB_SYNC_NONE writeback, the caller does not have the sb pinned
* before calling writeback. So make sure that we do pin it, so it doesn't
* go away while we are writing inodes from it.
- *
- * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
- * 1 if we failed.
*/
-static int pin_sb_for_writeback(struct writeback_control *wbc,
- struct inode *inode, struct super_block **psb)
+static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc,
+ struct super_block *sb)
{
- struct super_block *sb = inode->i_sb;
-
- /*
- * If this sb is already pinned, nothing more to do. If not and
- * *psb is non-NULL, unpin the old one first
- */
- if (sb == *psb)
- return 0;
- else if (*psb)
- unpin_sb_for_writeback(psb);
-
/*
* Caller must already hold the ref for this
*/
if (wbc->sync_mode == WB_SYNC_ALL) {
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- return 0;
+ return SB_NOT_PINNED;
}
-
spin_lock(&sb_lock);
sb->s_count++;
if (down_read_trylock(&sb->s_umount)) {
if (sb->s_root) {
spin_unlock(&sb_lock);
- goto pinned;
+ return SB_PINNED;
}
/*
* umounted, drop rwsem again and fall through to failure
*/
up_read(&sb->s_umount);
}
-
sb->s_count--;
spin_unlock(&sb_lock);
- return 1;
-pinned:
- *psb = sb;
- return 0;
+ return SB_PIN_FAILED;
}
-static void writeback_inodes_wb(struct bdi_writeback *wb,
- struct writeback_control *wbc)
+/*
+ * Write a portion of b_io inodes which belong to @sb.
+ * If @wbc->sb != NULL, then find and write all such
+ * inodes. Otherwise write only ones which go sequentially
+ * in reverse order.
+ * Return 1, if the caller writeback routine should be
+ * interrupted. Otherwise return 0.
+ */
+static int writeback_sb_inodes(struct super_block *sb,
+ struct bdi_writeback *wb,
+ struct writeback_control *wbc)
{
- struct super_block *sb = wbc->sb, *pin_sb = NULL;
- const unsigned long start = jiffies; /* livelock avoidance */
-
- spin_lock(&inode_lock);
-
- if (!wbc->for_kupdate || list_empty(&wb->b_io))
- queue_io(wb, wbc->older_than_this);
-
while (!list_empty(&wb->b_io)) {
- struct inode *inode = list_entry(wb->b_io.prev,
- struct inode, i_list);
long pages_skipped;
-
- /*
- * super block given and doesn't match, skip this inode
- */
- if (sb && sb != inode->i_sb) {
+ struct inode *inode = list_entry(wb->b_io.prev,
+ struct inode, i_list);
+ if (wbc->sb && sb != inode->i_sb) {
+ /* super block given and doesn't
+ match, skip this inode */
redirty_tail(inode);
continue;
}
-
+ if (sb != inode->i_sb)
+ /* finish with this superblock */
+ return 0;
if (inode->i_state & (I_NEW | I_WILL_FREE)) {
requeue_io(inode);
continue;
}
-
/*
* Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock.
*/
- if (inode_dirtied_after(inode, start))
- break;
-
- if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
- requeue_io(inode);
- continue;
- }
+ if (inode_dirtied_after(inode, wbc->wb_start))
+ return 1;
BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
__iget(inode);
@@ -674,14 +651,50 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
spin_lock(&inode_lock);
if (wbc->nr_to_write <= 0) {
wbc->more_io = 1;
- break;
+ return 1;
}
if (!list_empty(&wb->b_more_io))
wbc->more_io = 1;
}
+ /* b_io is empty */
+ return 1;
+}
+
+static void writeback_inodes_wb(struct bdi_writeback *wb,
+ struct writeback_control *wbc)
+{
+ int ret = 0;
- unpin_sb_for_writeback(&pin_sb);
+ wbc->wb_start = jiffies; /* livelock avoidance */
+ spin_lock(&inode_lock);
+ if (!wbc->for_kupdate || list_empty(&wb->b_io))
+ queue_io(wb, wbc->older_than_this);
+
+ while (!list_empty(&wb->b_io)) {
+ struct inode *inode = list_entry(wb->b_io.prev,
+ struct inode, i_list);
+ struct super_block *sb = inode->i_sb;
+ enum sb_pin_state state;
+
+ if (wbc->sb && sb != wbc->sb) {
+ /* super block given and doesn't
+ match, skip this inode */
+ redirty_tail(inode);
+ continue;
+ }
+ state = pin_sb_for_writeback(wbc, sb);
+
+ if (state == SB_PIN_FAILED) {
+ requeue_io(inode);
+ continue;
+ }
+ ret = writeback_sb_inodes(sb, wb, wbc);
+ if (state == SB_PINNED)
+ unpin_sb_for_writeback(sb);
+ if (ret)
+ break;
+ }
spin_unlock(&inode_lock);
/* Leave any unwritten inodes on b_io */
}
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 46435f3a..4765190 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -165,8 +165,8 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_object_lookups),
atomic_read(&fscache_n_object_lookups_negative),
atomic_read(&fscache_n_object_lookups_positive),
- atomic_read(&fscache_n_object_lookups_timed_out),
- atomic_read(&fscache_n_object_created));
+ atomic_read(&fscache_n_object_created),
+ atomic_read(&fscache_n_object_lookups_timed_out));
seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
atomic_read(&fscache_n_updates),
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d79a7b3..fe0cd9e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2068,8 +2068,7 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st
case -EDQUOT:
case -ENOSPC:
case -EROFS:
- lookup_instantiate_filp(nd, (struct dentry *)state, NULL);
- return 1;
+ return PTR_ERR(state);
default:
goto out_drop;
}
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 8d6356a..7cfb87e 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -426,7 +426,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
group_offset, bitmap))
- printk(KERN_WARNING "%s: entry numer %llu already freed\n",
+ printk(KERN_WARNING "%s: entry number %llu already freed\n",
__func__, (unsigned long long)req->pr_entry_nr);
nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 7cdd98b..76c38e3 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1879,7 +1879,7 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
struct nilfs_btree_path *path,
int level, struct buffer_head *bh)
{
- int maxlevel, ret;
+ int maxlevel = 0, ret;
struct nilfs_btree_node *parent;
struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
__u64 ptr;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index c2ff1b3..f90a33d 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -649,7 +649,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
- void __user *argp = (void * __user *)arg;
+ void __user *argp = (void __user *)arg;
switch (cmd) {
case NILFS_IOCTL_CHANGE_CPMODE:
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index a05a669..0705534 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -662,31 +662,18 @@ static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
return pme;
}
-static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
- unsigned long end, struct mm_walk *walk)
+/* This function walks within one hugetlb entry in the single call */
+static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
{
- struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
- struct hstate *hs = NULL;
int err = 0;
+ u64 pfn;
- vma = find_vma(walk->mm, addr);
- if (vma)
- hs = hstate_vma(vma);
for (; addr != end; addr += PAGE_SIZE) {
- u64 pfn = PM_NOT_PRESENT;
-
- if (vma && (addr >= vma->vm_end)) {
- vma = find_vma(walk->mm, addr);
- if (vma)
- hs = hstate_vma(vma);
- }
-
- if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) {
- /* calculate pfn of the "raw" page in the hugepage. */
- int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT;
- pfn = huge_pte_to_pagemap_entry(*pte, offset);
- }
+ int offset = (addr & ~hmask) >> PAGE_SHIFT;
+ pfn = huge_pte_to_pagemap_entry(*pte, offset);
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
OpenPOWER on IntegriCloud