summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/cifs/cifspdu.h2
-rw-r--r--fs/compat.c100
-rw-r--r--fs/configfs/dir.c27
-rw-r--r--fs/ecryptfs/dentry.c15
-rw-r--r--fs/ecryptfs/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c17
-rw-r--r--fs/jffs2/background.c8
-rw-r--r--fs/jffs2/readinode.c16
-rw-r--r--fs/jffs2/scan.c9
-rw-r--r--fs/jffs2/wbuf.c7
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/sysctl.c8
-rw-r--r--fs/nfs/write.c116
-rw-r--r--fs/nfsd/nfsfh.c1
-rw-r--r--fs/ocfs2/aops.c26
-rw-r--r--fs/ocfs2/cluster/heartbeat.c50
-rw-r--r--fs/ocfs2/cluster/heartbeat.h2
-rw-r--r--fs/ocfs2/cluster/tcp.c13
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c3
-rw-r--r--fs/ocfs2/dlm/dlmthread.c2
-rw-r--r--fs/ocfs2/heartbeat.c15
-rw-r--r--fs/partitions/Kconfig3
-rw-r--r--fs/partitions/check.c4
-rw-r--r--fs/proc/base.c21
-rw-r--r--fs/smbfs/request.c1
-rw-r--r--fs/sysfs/file.c64
-rw-r--r--fs/sysfs/inode.c10
-rw-r--r--fs/ufs/balloc.c86
-rw-r--r--fs/ufs/ialloc.c5
-rw-r--r--fs/ufs/inode.c36
-rw-r--r--fs/ufs/truncate.c38
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c4
34 files changed, 506 insertions, 214 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 51db118..a2fceba 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -507,7 +507,7 @@ out:
#define INTERPRETER_ELF 2
#ifndef STACK_RND_MASK
-#define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
+#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
#endif
static unsigned long randomize_stack_top(unsigned long stack_top)
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index e894545..4d8948e 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -220,7 +220,7 @@
*/
#define CIFS_NO_HANDLE 0xFFFF
-#define NO_CHANGE_64 0xFFFFFFFFFFFFFFFFULL
+#define NO_CHANGE_64 cpu_to_le64(0xFFFFFFFFFFFFFFFFULL)
#define NO_CHANGE_32 0xFFFFFFFFUL
/* IPC$ in ASCII */
diff --git a/fs/compat.c b/fs/compat.c
index 0ec70e3..040a8be 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -48,6 +48,7 @@
#include <linux/highmem.h>
#include <linux/poll.h>
#include <linux/mm.h>
+#include <linux/eventpoll.h>
#include <net/sock.h> /* siocdevprivate_ioctl */
@@ -2235,3 +2236,102 @@ long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
return sys_ni_syscall();
}
#endif
+
+#ifdef CONFIG_EPOLL
+
+#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
+asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd,
+ struct compat_epoll_event __user *event)
+{
+ long err = 0;
+ struct compat_epoll_event user;
+ struct epoll_event __user *kernel = NULL;
+
+ if (event) {
+ if (copy_from_user(&user, event, sizeof(user)))
+ return -EFAULT;
+ kernel = compat_alloc_user_space(sizeof(struct epoll_event));
+ err |= __put_user(user.events, &kernel->events);
+ err |= __put_user(user.data, &kernel->data);
+ }
+
+ return err ? err : sys_epoll_ctl(epfd, op, fd, kernel);
+}
+
+
+asmlinkage long compat_sys_epoll_wait(int epfd,
+ struct compat_epoll_event __user *events,
+ int maxevents, int timeout)
+{
+ long i, ret, err = 0;
+ struct epoll_event __user *kbuf;
+ struct epoll_event ev;
+
+ if ((maxevents <= 0) ||
+ (maxevents > (INT_MAX / sizeof(struct epoll_event))))
+ return -EINVAL;
+ kbuf = compat_alloc_user_space(sizeof(struct epoll_event) * maxevents);
+ ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
+ for (i = 0; i < ret; i++) {
+ err |= __get_user(ev.events, &kbuf[i].events);
+ err |= __get_user(ev.data, &kbuf[i].data);
+ err |= __put_user(ev.events, &events->events);
+ err |= __put_user_unaligned(ev.data, &events->data);
+ events++;
+ }
+
+ return err ? -EFAULT: ret;
+}
+#endif /* CONFIG_HAS_COMPAT_EPOLL_EVENT */
+
+#ifdef TIF_RESTORE_SIGMASK
+asmlinkage long compat_sys_epoll_pwait(int epfd,
+ struct compat_epoll_event __user *events,
+ int maxevents, int timeout,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize)
+{
+ long err;
+ compat_sigset_t csigmask;
+ sigset_t ksigmask, sigsaved;
+
+ /*
+ * If the caller wants a certain signal mask to be set during the wait,
+ * we apply it here.
+ */
+ if (sigmask) {
+ if (sigsetsize != sizeof(compat_sigset_t))
+ return -EINVAL;
+ if (copy_from_user(&csigmask, sigmask, sizeof(csigmask)))
+ return -EFAULT;
+ sigset_from_compat(&ksigmask, &csigmask);
+ sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
+ sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+ }
+
+#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
+ err = compat_sys_epoll_wait(epfd, events, maxevents, timeout);
+#else
+ err = sys_epoll_wait(epfd, events, maxevents, timeout);
+#endif
+
+ /*
+ * If we changed the signal mask, we need to restore the original one.
+ * In case we've got a signal while waiting, we do not restore the
+ * signal mask yet, and we allow do_signal() to deliver the signal on
+ * the way back to userspace, before the signal mask is restored.
+ */
+ if (sigmask) {
+ if (err == -EINTR) {
+ memcpy(&current->saved_sigmask, &sigsaved,
+ sizeof(sigsaved));
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ } else
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ }
+
+ return err;
+}
+#endif /* TIF_RESTORE_SIGMASK */
+
+#endif /* CONFIG_EPOLL */
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 34750d5..5e6e37e 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1141,25 +1141,22 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
err = -ENOMEM;
dentry = d_alloc(configfs_sb->s_root, &name);
- if (!dentry)
- goto out_release;
-
- d_add(dentry, NULL);
+ if (dentry) {
+ d_add(dentry, NULL);
- err = configfs_attach_group(sd->s_element, &group->cg_item,
- dentry);
- if (!err)
- dentry = NULL;
- else
- d_delete(dentry);
+ err = configfs_attach_group(sd->s_element, &group->cg_item,
+ dentry);
+ if (err) {
+ d_delete(dentry);
+ dput(dentry);
+ }
+ }
mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex);
- if (dentry) {
- dput(dentry);
-out_release:
- unlink_group(group);
- configfs_release_fs();
+ if (err) {
+ unlink_group(group);
+ configfs_release_fs();
}
return err;
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 329efcd..cb20b96 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -78,18 +78,13 @@ struct kmem_cache *ecryptfs_dentry_info_cache;
*/
static void ecryptfs_d_release(struct dentry *dentry)
{
- struct dentry *lower_dentry;
-
- lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (ecryptfs_dentry_to_private(dentry))
+ if (ecryptfs_dentry_to_private(dentry)) {
+ if (ecryptfs_dentry_to_lower(dentry)) {
+ mntput(ecryptfs_dentry_to_lower_mnt(dentry));
+ dput(ecryptfs_dentry_to_lower(dentry));
+ }
kmem_cache_free(ecryptfs_dentry_info_cache,
ecryptfs_dentry_to_private(dentry));
- if (lower_dentry) {
- struct vfsmount *lower_mnt =
- ecryptfs_dentry_to_lower_mnt(dentry);
-
- mntput(lower_mnt);
- dput(lower_dentry);
}
return;
}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index e62f3fc..1548be2 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -38,7 +38,7 @@ static struct dentry *lock_parent(struct dentry *dentry)
struct dentry *dir;
dir = dget(dentry->d_parent);
- mutex_lock(&(dir->d_inode->i_mutex));
+ mutex_lock_nested(&(dir->d_inode->i_mutex), I_MUTEX_PARENT);
return dir;
}
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index e965eb1..9baf697 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -47,7 +47,7 @@ struct dentry_operations hostfs_dentry_ops = {
};
/* Changed in hostfs_args before the kernel starts running */
-static char *root_ino = "/";
+static char *root_ino = "";
static int append = 0;
#define HOSTFS_SUPER_MAGIC 0x00c0ffee
@@ -947,15 +947,17 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
- if((data == NULL) || (*data == '\0'))
- data = root_ino;
+ /* NULL is printed as <NULL> by sprintf: avoid that. */
+ if (data == NULL)
+ data = "";
err = -ENOMEM;
- name = kmalloc(strlen(data) + 1, GFP_KERNEL);
+ name = kmalloc(strlen(root_ino) + 1
+ + strlen(data) + 1, GFP_KERNEL);
if(name == NULL)
goto out;
- strcpy(name, data);
+ sprintf(name, "%s/%s", root_ino, data);
root_inode = iget(sb, 0);
if(root_inode == NULL)
@@ -966,6 +968,9 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
goto out_put;
HOSTFS_I(root_inode)->host_filename = name;
+ /* Avoid that in the error path, iput(root_inode) frees again name through
+ * hostfs_destroy_inode! */
+ name = NULL;
err = -ENOMEM;
sb->s_root = d_alloc_root(root_inode);
@@ -977,7 +982,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
/* No iput in this case because the dput does that for us */
dput(sb->s_root);
sb->s_root = NULL;
- goto out_free;
+ goto out;
}
return(0);
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 6eb3dae..888f236 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -99,7 +99,13 @@ static int jffs2_garbage_collect_thread(void *_c)
if (try_to_freeze())
continue;
- cond_resched();
+ /* This thread is purely an optimisation. But if it runs when
+ other things could be running, it actually makes things a
+ lot worse. Use yield() and put it at the back of the runqueue
+ every time. Especially during boot, pulling an inode in
+ with read_inode() is much preferable to having the GC thread
+ get there first. */
+ yield();
/* Put_super will send a SIGKILL and then wait on the sem.
*/
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 58a0b912..717a48c 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -373,7 +373,14 @@ free_out:
static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
{
/* We don't mark unknown nodes as REF_UNCHECKED */
- BUG_ON(ref_flags(ref) == REF_UNCHECKED);
+ if (ref_flags(ref) == REF_UNCHECKED) {
+ JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
+ ref_offset(ref));
+ JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
+ je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
+ je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
+ return 1;
+ }
un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
@@ -576,6 +583,13 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
jffs2_mark_node_obsolete(c, ref);
goto cont;
}
+ /* Due to poor choice of crc32 seed, an all-zero node will have a correct CRC */
+ if (!je32_to_cpu(node->u.hdr_crc) && !je16_to_cpu(node->u.nodetype) &&
+ !je16_to_cpu(node->u.magic) && !je32_to_cpu(node->u.totlen)) {
+ JFFS2_NOTICE("All zero node header at %#08x.\n", ref_offset(ref));
+ jffs2_mark_node_obsolete(c, ref);
+ goto cont;
+ }
switch (je16_to_cpu(node->u.nodetype)) {
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 31c1475..7fb45bd4 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -734,6 +734,15 @@ scan_more:
ofs += 4;
continue;
}
+ /* Due to poor choice of crc32 seed, an all-zero node will have a correct CRC */
+ if (!je32_to_cpu(node->hdr_crc) && !je16_to_cpu(node->nodetype) &&
+ !je16_to_cpu(node->magic) && !je32_to_cpu(node->totlen)) {
+ noisy_printk(&noise, "jffs2_scan_eraseblock(): All zero node header at 0x%08x.\n", ofs);
+ if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
+ return err;
+ ofs += 4;
+ continue;
+ }
if (ofs + je32_to_cpu(node->totlen) >
jeb->offset + c->sector_size) {
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index de718e3..4fac6dd 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -238,7 +238,10 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
spin_lock(&c->erase_completion_lock);
- jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
+ if (c->wbuf_ofs % c->mtd->erasesize)
+ jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
+ else
+ jffs2_block_refile(c, jeb, REFILE_ANYWAY);
spin_unlock(&c->erase_completion_lock);
BUG_ON(!ref_obsolete(jeb->last_node));
@@ -1087,7 +1090,7 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
if (!c->mtd->block_markbad)
return 1; // What else can we do?
- D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
+ printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
ret = c->mtd->block_markbad(c->mtd, bad_offset);
if (ret) {
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index af53c02..93d046c 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -429,7 +429,8 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
int err;
/* Flush out writes to the server in order to update c/mtime */
- nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT);
+ if (S_ISREG(inode->i_mode))
+ nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT);
/*
* We may force a getattr if the user cares about atime.
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index bb516a2..f1eae44 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -151,10 +151,10 @@ int __init register_nfs_fs(void)
if (ret < 0)
goto error_0;
-#ifdef CONFIG_NFS_V4
ret = nfs_register_sysctl();
if (ret < 0)
goto error_1;
+#ifdef CONFIG_NFS_V4
ret = register_filesystem(&nfs4_fs_type);
if (ret < 0)
goto error_2;
@@ -165,9 +165,9 @@ int __init register_nfs_fs(void)
#ifdef CONFIG_NFS_V4
error_2:
nfs_unregister_sysctl();
+#endif
error_1:
unregister_filesystem(&nfs_fs_type);
-#endif
error_0:
return ret;
}
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index fcdcafb..b62481da 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -50,6 +50,14 @@ static ctl_table nfs_cb_sysctls[] = {
.proc_handler = &proc_dointvec_jiffies,
.strategy = &sysctl_jiffies,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nfs_congestion_kb",
+ .data = &nfs_congestion_kb,
+ .maxlen = sizeof(nfs_congestion_kb),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
{ .ctl_name = 0 }
};
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index febdade..2867e6b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -12,6 +12,7 @@
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/writeback.h>
+#include <linux/swap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
@@ -38,7 +39,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*,
struct page *,
unsigned int, unsigned int);
static void nfs_mark_request_dirty(struct nfs_page *req);
-static int nfs_wait_on_write_congestion(struct address_space *, int);
static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
static const struct rpc_call_ops nfs_write_partial_ops;
static const struct rpc_call_ops nfs_write_full_ops;
@@ -48,8 +48,6 @@ static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
static mempool_t *nfs_commit_mempool;
-static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
-
struct nfs_write_data *nfs_commit_alloc(void)
{
struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
@@ -211,6 +209,40 @@ static int wb_priority(struct writeback_control *wbc)
}
/*
+ * NFS congestion control
+ */
+
+int nfs_congestion_kb;
+
+#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
+#define NFS_CONGESTION_OFF_THRESH \
+ (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
+
+static void nfs_set_page_writeback(struct page *page)
+{
+ if (!test_set_page_writeback(page)) {
+ struct inode *inode = page->mapping->host;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+
+ if (atomic_inc_return(&nfss->writeback) >
+ NFS_CONGESTION_ON_THRESH)
+ set_bdi_congested(&nfss->backing_dev_info, WRITE);
+ }
+}
+
+static void nfs_end_page_writeback(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+
+ end_page_writeback(page);
+ if (atomic_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) {
+ clear_bdi_congested(&nfss->backing_dev_info, WRITE);
+ congestion_end(WRITE);
+ }
+}
+
+/*
* Find an associated nfs write request, and prepare to flush it out
* Returns 1 if there was no write request, or if the request was
* already tagged by nfs_set_page_dirty.Returns 0 if the request
@@ -247,7 +279,7 @@ static int nfs_page_mark_flush(struct page *page)
spin_unlock(req_lock);
if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) {
nfs_mark_request_dirty(req);
- set_page_writeback(page);
+ nfs_set_page_writeback(page);
}
ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
nfs_unlock_request(req);
@@ -302,13 +334,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
return err;
}
-/*
- * Note: causes nfs_update_request() to block on the assumption
- * that the writeback is generated due to memory pressure.
- */
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
struct inode *inode = mapping->host;
int err;
@@ -317,20 +344,12 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
err = generic_writepages(mapping, wbc);
if (err)
return err;
- while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
- if (wbc->nonblocking)
- return 0;
- nfs_wait_on_write_congestion(mapping, 0);
- }
err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc));
if (err < 0)
goto out;
nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
err = 0;
out:
- clear_bit(BDI_write_congested, &bdi->state);
- wake_up_all(&nfs_write_congestion);
- congestion_end(WRITE);
return err;
}
@@ -360,7 +379,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
}
/*
- * Insert a write request into an inode
+ * Remove a write request from an inode
*/
static void nfs_inode_remove_request(struct nfs_page *req)
{
@@ -531,10 +550,10 @@ static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, un
}
#endif
-static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
+static int nfs_wait_on_write_congestion(struct address_space *mapping)
{
+ struct inode *inode = mapping->host;
struct backing_dev_info *bdi = mapping->backing_dev_info;
- DEFINE_WAIT(wait);
int ret = 0;
might_sleep();
@@ -542,31 +561,23 @@ static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
if (!bdi_write_congested(bdi))
return 0;
- nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
+ nfs_inc_stats(inode, NFSIOS_CONGESTIONWAIT);
- if (intr) {
- struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
+ do {
+ struct rpc_clnt *clnt = NFS_CLIENT(inode);
sigset_t oldset;
rpc_clnt_sigmask(clnt, &oldset);
- prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
- if (bdi_write_congested(bdi)) {
- if (signalled())
- ret = -ERESTARTSYS;
- else
- schedule();
- }
+ ret = congestion_wait_interruptible(WRITE, HZ/10);
rpc_clnt_sigunmask(clnt, &oldset);
- } else {
- prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
- if (bdi_write_congested(bdi))
- schedule();
- }
- finish_wait(&nfs_write_congestion, &wait);
+ if (ret == -ERESTARTSYS)
+ break;
+ ret = 0;
+ } while (bdi_write_congested(bdi));
+
return ret;
}
-
/*
* Try to update any existing write request, or create one if there is none.
* In order to match, the request's credentials must match those of
@@ -577,14 +588,15 @@ static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
struct page *page, unsigned int offset, unsigned int bytes)
{
- struct inode *inode = page->mapping->host;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *req, *new = NULL;
unsigned long rqend, end;
end = offset + bytes;
- if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR))
+ if (nfs_wait_on_write_congestion(mapping))
return ERR_PTR(-ERESTARTSYS);
for (;;) {
/* Loop over all inode entries and see if we find
@@ -727,7 +739,7 @@ int nfs_updatepage(struct file *file, struct page *page,
static void nfs_writepage_release(struct nfs_page *req)
{
- end_page_writeback(req->wb_page);
+ nfs_end_page_writeback(req->wb_page);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (!PageError(req->wb_page)) {
@@ -1042,12 +1054,12 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
if (task->tk_status < 0) {
nfs_set_pageerror(page);
req->wb_context->error = task->tk_status;
- end_page_writeback(page);
+ nfs_end_page_writeback(page);
nfs_inode_remove_request(req);
dprintk(", error = %d\n", task->tk_status);
goto next;
}
- end_page_writeback(page);
+ nfs_end_page_writeback(page);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
@@ -1514,6 +1526,26 @@ int __init nfs_init_writepagecache(void)
if (nfs_commit_mempool == NULL)
return -ENOMEM;
+ /*
+ * NFS congestion size, scale with available memory.
+ *
+ * 64MB: 8192k
+ * 128MB: 11585k
+ * 256MB: 16384k
+ * 512MB: 23170k
+ * 1GB: 32768k
+ * 2GB: 46340k
+ * 4GB: 65536k
+ * 8GB: 92681k
+ * 16GB: 131072k
+ *
+ * This allows larger machines to have larger/more transfers.
+ * Limit the default to 256M
+ */
+ nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
+ if (nfs_congestion_kb > 256*1024)
+ nfs_congestion_kb = 256*1024;
+
return 0;
}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index c2660cb..8d995bc 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -17,7 +17,6 @@
#include <linux/stat.h>
#include <linux/dcache.h>
#include <linux/mount.h>
-#include <asm/pgtable.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 93628b0..875c114 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -614,6 +614,27 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
ocfs2_rw_unlock(inode, 0);
}
+/*
+ * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen
+ * from ext3. PageChecked() bits have been removed as OCFS2 does not
+ * do journalled data.
+ */
+static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
+{
+ journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
+
+ journal_invalidatepage(journal, page, offset);
+}
+
+static int ocfs2_releasepage(struct page *page, gfp_t wait)
+{
+ journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
+
+ if (!page_has_buffers(page))
+ return 0;
+ return journal_try_to_free_buffers(journal, page, wait);
+}
+
static ssize_t ocfs2_direct_IO(int rw,
struct kiocb *iocb,
const struct iovec *iov,
@@ -661,5 +682,8 @@ const struct address_space_operations ocfs2_aops = {
.commit_write = ocfs2_commit_write,
.bmap = ocfs2_bmap,
.sync_page = block_sync_page,
- .direct_IO = ocfs2_direct_IO
+ .direct_IO = ocfs2_direct_IO,
+ .invalidatepage = ocfs2_invalidatepage,
+ .releasepage = ocfs2_releasepage,
+ .migratepage = buffer_migrate_page,
};
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 5a9779b..eba282d 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1234,6 +1234,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
const char *page,
size_t count)
{
+ struct task_struct *hb_task;
long fd;
int sectsize;
char *p = (char *)page;
@@ -1319,20 +1320,28 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
*/
atomic_set(&reg->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1);
- reg->hr_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
- reg->hr_item.ci_name);
- if (IS_ERR(reg->hr_task)) {
- ret = PTR_ERR(reg->hr_task);
+ hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
+ reg->hr_item.ci_name);
+ if (IS_ERR(hb_task)) {
+ ret = PTR_ERR(hb_task);
mlog_errno(ret);
- reg->hr_task = NULL;
goto out;
}
+ spin_lock(&o2hb_live_lock);
+ reg->hr_task = hb_task;
+ spin_unlock(&o2hb_live_lock);
+
ret = wait_event_interruptible(o2hb_steady_queue,
atomic_read(&reg->hr_steady_iterations) == 0);
if (ret) {
- kthread_stop(reg->hr_task);
+ spin_lock(&o2hb_live_lock);
+ hb_task = reg->hr_task;
reg->hr_task = NULL;
+ spin_unlock(&o2hb_live_lock);
+
+ if (hb_task)
+ kthread_stop(hb_task);
goto out;
}
@@ -1354,10 +1363,17 @@ out:
static ssize_t o2hb_region_pid_read(struct o2hb_region *reg,
char *page)
{
- if (!reg->hr_task)
+ pid_t pid = 0;
+
+ spin_lock(&o2hb_live_lock);
+ if (reg->hr_task)
+ pid = reg->hr_task->pid;
+ spin_unlock(&o2hb_live_lock);
+
+ if (!pid)
return 0;
- return sprintf(page, "%u\n", reg->hr_task->pid);
+ return sprintf(page, "%u\n", pid);
}
struct o2hb_region_attribute {
@@ -1495,13 +1511,17 @@ out:
static void o2hb_heartbeat_group_drop_item(struct config_group *group,
struct config_item *item)
{
+ struct task_struct *hb_task;
struct o2hb_region *reg = to_o2hb_region(item);
/* stop the thread when the user removes the region dir */
- if (reg->hr_task) {
- kthread_stop(reg->hr_task);
- reg->hr_task = NULL;
- }
+ spin_lock(&o2hb_live_lock);
+ hb_task = reg->hr_task;
+ reg->hr_task = NULL;
+ spin_unlock(&o2hb_live_lock);
+
+ if (hb_task)
+ kthread_stop(hb_task);
config_item_put(item);
}
@@ -1682,7 +1702,7 @@ out:
}
EXPORT_SYMBOL_GPL(o2hb_register_callback);
-int o2hb_unregister_callback(struct o2hb_callback_func *hc)
+void o2hb_unregister_callback(struct o2hb_callback_func *hc)
{
BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
@@ -1690,15 +1710,13 @@ int o2hb_unregister_callback(struct o2hb_callback_func *hc)
__builtin_return_address(0), hc);
if (list_empty(&hc->hc_item))
- return 0;
+ return;
down_write(&o2hb_callback_sem);
list_del_init(&hc->hc_item);
up_write(&o2hb_callback_sem);
-
- return 0;
}
EXPORT_SYMBOL_GPL(o2hb_unregister_callback);
diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h
index cac6223..cc6d40b 100644
--- a/fs/ocfs2/cluster/heartbeat.h
+++ b/fs/ocfs2/cluster/heartbeat.h
@@ -70,7 +70,7 @@ void o2hb_setup_callback(struct o2hb_callback_func *hc,
void *data,
int priority);
int o2hb_register_callback(struct o2hb_callback_func *hc);
-int o2hb_unregister_callback(struct o2hb_callback_func *hc);
+void o2hb_unregister_callback(struct o2hb_callback_func *hc);
void o2hb_fill_node_map(unsigned long *map,
unsigned bytes);
void o2hb_init(void);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1718215..69caf3e 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1638,17 +1638,8 @@ static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
void o2net_unregister_hb_callbacks(void)
{
- int ret;
-
- ret = o2hb_unregister_callback(&o2net_hb_up);
- if (ret < 0)
- mlog(ML_ERROR, "Status return %d unregistering heartbeat up "
- "callback!\n", ret);
-
- ret = o2hb_unregister_callback(&o2net_hb_down);
- if (ret < 0)
- mlog(ML_ERROR, "Status return %d unregistering heartbeat down "
- "callback!\n", ret);
+ o2hb_unregister_callback(&o2net_hb_up);
+ o2hb_unregister_callback(&o2net_hb_down);
}
int o2net_register_hb_callbacks(void)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 77e4e61..9229e04 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2730,14 +2730,17 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
int ret;
int lock_dropped = 0;
+ spin_lock(&res->spinlock);
if (res->owner != dlm->node_num) {
if (!__dlm_lockres_unused(res)) {
mlog(ML_ERROR, "%s:%.*s: this node is not master, "
"trying to free this but locks remain\n",
dlm->name, res->lockname.len, res->lockname.name);
}
+ spin_unlock(&res->spinlock);
goto leave;
}
+ spin_unlock(&res->spinlock);
/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
spin_unlock(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 8ffa091..6421a8f 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -265,8 +265,10 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
/* This may drop and reacquire the dlm spinlock if it
* has to do migration. */
mlog(0, "calling dlm_purge_lockres!\n");
+ dlm_lockres_get(lockres);
if (dlm_purge_lockres(dlm, lockres))
BUG();
+ dlm_lockres_put(lockres);
mlog(0, "DONE calling dlm_purge_lockres!\n");
/* Avoid adding any scheduling latencies */
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 8fc52d6..b25ef63 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -164,8 +164,10 @@ int ocfs2_register_hb_callbacks(struct ocfs2_super *osb)
}
status = o2hb_register_callback(&osb->osb_hb_up);
- if (status < 0)
+ if (status < 0) {
mlog_errno(status);
+ o2hb_unregister_callback(&osb->osb_hb_down);
+ }
bail:
return status;
@@ -173,18 +175,11 @@ bail:
void ocfs2_clear_hb_callbacks(struct ocfs2_super *osb)
{
- int status;
-
if (ocfs2_mount_local(osb))
return;
- status = o2hb_unregister_callback(&osb->osb_hb_down);
- if (status < 0)
- mlog_errno(status);
-
- status = o2hb_unregister_callback(&osb->osb_hb_up);
- if (status < 0)
- mlog_errno(status);
+ o2hb_unregister_callback(&osb->osb_hb_down);
+ o2hb_unregister_callback(&osb->osb_hb_up);
}
void ocfs2_stop_heartbeat(struct ocfs2_super *osb)
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig
index 74552c6..6e8bb66 100644
--- a/fs/partitions/Kconfig
+++ b/fs/partitions/Kconfig
@@ -235,5 +235,4 @@ config EFI_PARTITION
select CRC32
help
Say Y here if you would like to use hard disks under Linux which
- were partitioned using EFI GPT. Presently only useful on the
- IA-64 platform.
+ were partitioned using EFI GPT.
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 22d38ff..8a7d003 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -180,7 +180,7 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
}
if (res > 0)
return state;
- if (!err)
+ if (err)
/* The partition is unrecognized. So report I/O errors if there were any */
res = err;
if (!res)
@@ -541,7 +541,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
return 0;
if (IS_ERR(state)) /* I/O error reading the partition table */
- return PTR_ERR(state);
+ return -EIO;
for (p = 1; p < state->limit; p++) {
sector_t size = state->parts[p].size;
sector_t from = state->parts[p].from;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 01f7769..989af5e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1558,29 +1558,20 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
struct inode * inode = file->f_path.dentry->d_inode;
- unsigned long page;
+ char *p = NULL;
ssize_t length;
struct task_struct *task = get_proc_task(inode);
- length = -ESRCH;
if (!task)
- goto out_no_task;
-
- if (count > PAGE_SIZE)
- count = PAGE_SIZE;
- length = -ENOMEM;
- if (!(page = __get_free_page(GFP_KERNEL)))
- goto out;
+ return -ESRCH;
length = security_getprocattr(task,
(char*)file->f_path.dentry->d_name.name,
- (void*)page, count);
- if (length >= 0)
- length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
- free_page(page);
-out:
+ &p);
put_task_struct(task);
-out_no_task:
+ if (length > 0)
+ length = simple_read_from_buffer(buf, count, ppos, p, length);
+ kfree(p);
return length;
}
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index 42261db..723f7c6 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -181,6 +181,7 @@ static int smb_setup_request(struct smb_request *req)
req->rq_errno = 0;
req->rq_fragment = 0;
kfree(req->rq_trans2buffer);
+ req->rq_trans2buffer = NULL;
return 0;
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 8d4d839..fc46333 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -168,12 +168,12 @@ sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
ssize_t retval = 0;
down(&buffer->sem);
- if (buffer->orphaned) {
- retval = -ENODEV;
- goto out;
- }
if (buffer->needs_read_fill) {
- if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
+ if (buffer->orphaned)
+ retval = -ENODEV;
+ else
+ retval = fill_read_buffer(file->f_path.dentry,buffer);
+ if (retval)
goto out;
}
pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
@@ -629,6 +629,60 @@ void sysfs_remove_file_from_group(struct kobject *kobj,
}
EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
+struct sysfs_schedule_callback_struct {
+ struct kobject *kobj;
+ void (*func)(void *);
+ void *data;
+ struct work_struct work;
+};
+
+static void sysfs_schedule_callback_work(struct work_struct *work)
+{
+ struct sysfs_schedule_callback_struct *ss = container_of(work,
+ struct sysfs_schedule_callback_struct, work);
+
+ (ss->func)(ss->data);
+ kobject_put(ss->kobj);
+ kfree(ss);
+}
+
+/**
+ * sysfs_schedule_callback - helper to schedule a callback for a kobject
+ * @kobj: object we're acting for.
+ * @func: callback function to invoke later.
+ * @data: argument to pass to @func.
+ *
+ * sysfs attribute methods must not unregister themselves or their parent
+ * kobject (which would amount to the same thing). Attempts to do so will
+ * deadlock, since unregistration is mutually exclusive with driver
+ * callbacks.
+ *
+ * Instead methods can call this routine, which will attempt to allocate
+ * and schedule a workqueue request to call back @func with @data as its
+ * argument in the workqueue's process context. @kobj will be pinned
+ * until @func returns.
+ *
+ * Returns 0 if the request was submitted, -ENOMEM if storage could not
+ * be allocated.
+ */
+int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
+ void *data)
+{
+ struct sysfs_schedule_callback_struct *ss;
+
+ ss = kmalloc(sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+ kobject_get(kobj);
+ ss->kobj = kobj;
+ ss->func = func;
+ ss->data = data;
+ INIT_WORK(&ss->work, sysfs_schedule_callback_work);
+ schedule_work(&ss->work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sysfs_schedule_callback);
+
EXPORT_SYMBOL_GPL(sysfs_create_file);
EXPORT_SYMBOL_GPL(sysfs_remove_file);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index ccb7d72..4de5c6b 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -222,13 +222,17 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
static inline void orphan_all_buffers(struct inode *node)
{
- struct sysfs_buffer_collection *set = node->i_private;
+ struct sysfs_buffer_collection *set;
struct sysfs_buffer *buf;
mutex_lock_nested(&node->i_mutex, I_MUTEX_CHILD);
- if (node->i_private) {
- list_for_each_entry(buf, &set->associates, associates)
+ set = node->i_private;
+ if (set) {
+ list_for_each_entry(buf, &set->associates, associates) {
+ down(&buf->sem);
buf->orphaned = 1;
+ up(&buf->sem);
+ }
}
mutex_unlock(&node->i_mutex);
}
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index bcc4408..841ac25 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -244,62 +244,87 @@ failed:
* We can come here from ufs_writepage or ufs_prepare_write,
* locked_page is argument of these functions, so we already lock it.
*/
-static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
- unsigned int count, unsigned int oldb,
- unsigned int newb, struct page *locked_page)
+static void ufs_change_blocknr(struct inode *inode, sector_t beg,
+ unsigned int count, sector_t oldb,
+ sector_t newb, struct page *locked_page)
{
- const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
+ const unsigned blks_per_page =
+ 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ const unsigned mask = blks_per_page - 1;
struct address_space * const mapping = inode->i_mapping;
- pgoff_t index, cur_index;
- unsigned end, pos, j;
+ pgoff_t index, cur_index, last_index;
+ unsigned pos, j, lblock;
+ sector_t end, i;
struct page *page;
struct buffer_head *head, *bh;
- UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
- inode->i_ino, count, oldb, newb);
+ UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
+ inode->i_ino, count,
+ (unsigned long long)oldb, (unsigned long long)newb);
BUG_ON(!locked_page);
BUG_ON(!PageLocked(locked_page));
cur_index = locked_page->index;
-
- for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
- index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ end = count + beg;
+ last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ for (i = beg; i < end; i = (i | mask) + 1) {
+ index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
if (likely(cur_index != index)) {
page = ufs_get_locked_page(mapping, index);
- if (!page || IS_ERR(page)) /* it was truncated or EIO */
+ if (!page)/* it was truncated */
+ continue;
+ if (IS_ERR(page)) {/* or EIO */
+ ufs_error(inode->i_sb, __FUNCTION__,
+ "read of page %llu failed\n",
+ (unsigned long long)index);
continue;
+ }
} else
page = locked_page;
head = page_buffers(page);
bh = head;
- pos = beg & mask;
+ pos = i & mask;
for (j = 0; j < pos; ++j)
bh = bh->b_this_page;
- j = 0;
+
+
+ if (unlikely(index == last_index))
+ lblock = end & mask;
+ else
+ lblock = blks_per_page;
+
do {
- if (buffer_mapped(bh)) {
- pos = bh->b_blocknr - oldb;
- if (pos < count) {
- UFSD(" change from %llu to %llu\n",
- (unsigned long long)pos + oldb,
- (unsigned long long)pos + newb);
- bh->b_blocknr = newb + pos;
- unmap_underlying_metadata(bh->b_bdev,
- bh->b_blocknr);
- mark_buffer_dirty(bh);
- ++j;
+ if (j >= lblock)
+ break;
+ pos = (i - beg) + j;
+
+ if (!buffer_mapped(bh))
+ map_bh(bh, inode->i_sb, oldb + pos);
+ if (!buffer_uptodate(bh)) {
+ ll_rw_block(READ, 1, &bh);
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh)) {
+ ufs_error(inode->i_sb, __FUNCTION__,
+ "read of block failed\n");
+ break;
}
}
+ UFSD(" change from %llu to %llu, pos %u\n",
+ (unsigned long long)pos + oldb,
+ (unsigned long long)pos + newb, pos);
+
+ bh->b_blocknr = newb + pos;
+ unmap_underlying_metadata(bh->b_bdev,
+ bh->b_blocknr);
+ mark_buffer_dirty(bh);
+ ++j;
bh = bh->b_this_page;
} while (bh != head);
- if (j)
- set_page_dirty(page);
-
if (likely(cur_index != index))
ufs_put_locked_page(page);
}
@@ -457,8 +482,9 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
if (result) {
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
locked_page != NULL);
- ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
- result, locked_page);
+ ufs_change_blocknr(inode, fragment - oldcount, oldcount,
+ uspi->s_sbbase + tmp,
+ uspi->s_sbbase + result, locked_page);
ufs_cpu_to_data_ptr(sb, p, result);
*err = 0;
UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index b868878..c28a8b6 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -343,9 +343,8 @@ cg_found:
lock_buffer(bh);
ufs2_inode = (struct ufs2_inode *)bh->b_data;
ufs2_inode += ufs_inotofsbo(inode->i_ino);
- ufs2_inode->ui_birthtime.tv_sec =
- cpu_to_fs32(sb, CURRENT_TIME_SEC.tv_sec);
- ufs2_inode->ui_birthtime.tv_usec = 0;
+ ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec);
+ ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec);
mark_buffer_dirty(bh);
unlock_buffer(bh);
if (sb->s_flags & MS_SYNCHRONOUS)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index fb34ad0..013d7af 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -212,7 +212,7 @@ repeat:
brelse (result);
goto repeat;
} else {
- *phys = tmp + blockoff;
+ *phys = uspi->s_sbbase + tmp + blockoff;
return NULL;
}
}
@@ -282,9 +282,9 @@ repeat:
}
if (!phys) {
- result = sb_getblk(sb, tmp + blockoff);
+ result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
} else {
- *phys = tmp + blockoff;
+ *phys = uspi->s_sbbase + tmp + blockoff;
result = NULL;
*err = 0;
*new = 1;
@@ -368,7 +368,7 @@ repeat:
brelse (result);
goto repeat;
} else {
- *phys = tmp + blockoff;
+ *phys = uspi->s_sbbase + tmp + blockoff;
goto out;
}
}
@@ -389,9 +389,9 @@ repeat:
if (!phys) {
- result = sb_getblk(sb, tmp + blockoff);
+ result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
} else {
- *phys = tmp + blockoff;
+ *phys = uspi->s_sbbase + tmp + blockoff;
*new = 1;
}
@@ -668,12 +668,12 @@ static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid);
inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
- inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_atime.tv_sec);
- inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_ctime.tv_sec);
- inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_mtime.tv_sec);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
+ inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
+ inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
+ inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
+ inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
+ inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
+ inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
@@ -803,12 +803,12 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
ufs_inode->ui_gid = cpu_to_fs32(sb, inode->i_gid);
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
- ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
- ufs_inode->ui_atime.tv_usec = 0;
- ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
- ufs_inode->ui_ctime.tv_usec = 0;
- ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
- ufs_inode->ui_mtime.tv_usec = 0;
+ ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
+ ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
+ ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
+ ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
+ ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
+ ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 749581f..79c54c85 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -74,7 +74,7 @@ static int ufs_trunc_direct(struct inode *inode)
unsigned i, tmp;
int retry;
- UFSD("ENTER\n");
+ UFSD("ENTER: ino %lu\n", inode->i_ino);
sb = inode->i_sb;
uspi = UFS_SB(sb)->s_uspi;
@@ -96,8 +96,8 @@ static int ufs_trunc_direct(struct inode *inode)
block2 = ufs_fragstoblks (frag3);
}
- UFSD("frag1 %llu, frag2 %llu, block1 %llu, block2 %llu, frag3 %llu,"
- " frag4 %llu\n",
+ UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
+ " frag3 %llu, frag4 %llu\n", inode->i_ino,
(unsigned long long)frag1, (unsigned long long)frag2,
(unsigned long long)block1, (unsigned long long)block2,
(unsigned long long)frag3, (unsigned long long)frag4);
@@ -163,7 +163,7 @@ next1:
mark_inode_dirty(inode);
next3:
- UFSD("EXIT\n");
+ UFSD("EXIT: ino %lu\n", inode->i_ino);
return retry;
}
@@ -248,7 +248,7 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
}
ubh_brelse (ind_ubh);
- UFSD("EXIT\n");
+ UFSD("EXIT: ino %lu\n", inode->i_ino);
return retry;
}
@@ -262,7 +262,7 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
void *dind;
int retry = 0;
- UFSD("ENTER\n");
+ UFSD("ENTER: ino %lu\n", inode->i_ino);
sb = inode->i_sb;
uspi = UFS_SB(sb)->s_uspi;
@@ -312,7 +312,7 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
}
ubh_brelse (dind_bh);
- UFSD("EXIT\n");
+ UFSD("EXIT: ino %lu\n", inode->i_ino);
return retry;
}
@@ -327,7 +327,7 @@ static int ufs_trunc_tindirect(struct inode *inode)
void *tind, *p;
int retry;
- UFSD("ENTER\n");
+ UFSD("ENTER: ino %lu\n", inode->i_ino);
retry = 0;
@@ -348,7 +348,7 @@ static int ufs_trunc_tindirect(struct inode *inode)
}
for (i = tindirect_block ; i < uspi->s_apb ; i++) {
- tind = ubh_get_addr32 (tind_bh, i);
+ tind = ubh_get_data_ptr(uspi, tind_bh, i);
retry |= ufs_trunc_dindirect(inode, UFS_NDADDR +
uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
ubh_mark_buffer_dirty(tind_bh);
@@ -372,19 +372,21 @@ static int ufs_trunc_tindirect(struct inode *inode)
}
ubh_brelse (tind_bh);
- UFSD("EXIT\n");
+ UFSD("EXIT: ino %lu\n", inode->i_ino);
return retry;
}
static int ufs_alloc_lastblock(struct inode *inode)
{
int err = 0;
+ struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
- struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
unsigned i, end;
sector_t lastfrag;
struct page *lastpage;
struct buffer_head *bh;
+ u64 phys64;
lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
@@ -424,6 +426,20 @@ static int ufs_alloc_lastblock(struct inode *inode)
set_page_dirty(lastpage);
}
+ if (lastfrag >= UFS_IND_FRAGMENT) {
+ end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
+ phys64 = bh->b_blocknr + 1;
+ for (i = 0; i < end; ++i) {
+ bh = sb_getblk(sb, i + phys64);
+ lock_buffer(bh);
+ memset(bh->b_data, 0, sb->s_blocksize);
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ unlock_buffer(bh);
+ sync_dirty_buffer(bh);
+ brelse(bh);
+ }
+ }
out_unlock:
ufs_put_locked_page(lastpage);
out:
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index e2bea6a..69e9e80 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1829,11 +1829,11 @@ xfs_buf_init(void)
if (!xfs_buf_zone)
goto out_free_trace_buf;
- xfslogd_workqueue = create_freezeable_workqueue("xfslogd");
+ xfslogd_workqueue = create_workqueue("xfslogd");
if (!xfslogd_workqueue)
goto out_free_buf_zone;
- xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad");
+ xfsdatad_workqueue = create_workqueue("xfsdatad");
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
OpenPOWER on IntegriCloud