diff options
author | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-12-05 15:27:43 +1100 |
---|---|---|
committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-12-05 15:27:43 +1100 |
commit | 14d676f56fad26fd3c31eeff5d4ef8ea4a163571 (patch) | |
tree | 5e740c5931daecf44a6e74c230f2deb291290f4b /fs | |
parent | 797eaed40e1df4a3b9ece6894a71ce2b568bca38 (diff) | |
parent | feaf3848a813a106f163013af6fcf6c4bfec92d9 (diff) | |
download | op-kernel-dev-14d676f56fad26fd3c31eeff5d4ef8ea4a163571.zip op-kernel-dev-14d676f56fad26fd3c31eeff5d4ef8ea4a163571.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs')
-rw-r--r-- | fs/buffer.c | 1 | ||||
-rw-r--r-- | fs/cifs/file.c | 77 | ||||
-rw-r--r-- | fs/eventpoll.c | 85 | ||||
-rw-r--r-- | fs/lockd/host.c | 3 | ||||
-rw-r--r-- | fs/lockd/svc.c | 1 | ||||
-rw-r--r-- | fs/nfsd/nfs4recover.c | 2 | ||||
-rw-r--r-- | fs/nfsd/nfs4state.c | 1 | ||||
-rw-r--r-- | fs/ntfs/debug.h | 8 | ||||
-rw-r--r-- | fs/ocfs2/buffer_head_io.c | 15 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmfs.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/dlm/userdlm.h | 2 | ||||
-rw-r--r-- | fs/ocfs2/dlmglue.c | 3 | ||||
-rw-r--r-- | fs/ocfs2/ocfs2.h | 2 | ||||
-rw-r--r-- | fs/ocfs2/stack_user.c | 3 | ||||
-rw-r--r-- | fs/ubifs/commit.c | 4 | ||||
-rw-r--r-- | fs/ubifs/debug.c | 66 | ||||
-rw-r--r-- | fs/ubifs/dir.c | 5 | ||||
-rw-r--r-- | fs/ubifs/file.c | 91 | ||||
-rw-r--r-- | fs/ubifs/journal.c | 8 | ||||
-rw-r--r-- | fs/ubifs/key.h | 4 | ||||
-rw-r--r-- | fs/ubifs/lpt_commit.c | 2 | ||||
-rw-r--r-- | fs/ubifs/orphan.c | 28 | ||||
-rw-r--r-- | fs/ubifs/recovery.c | 17 | ||||
-rw-r--r-- | fs/ubifs/replay.c | 2 | ||||
-rw-r--r-- | fs/ubifs/sb.c | 9 | ||||
-rw-r--r-- | fs/ubifs/super.c | 70 | ||||
-rw-r--r-- | fs/ubifs/tnc.c | 12 | ||||
-rw-r--r-- | fs/ubifs/ubifs.h | 12 | ||||
-rw-r--r-- | fs/udf/inode.c | 1 |
29 files changed, 376 insertions, 162 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 6569fda..10179cf 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -878,6 +878,7 @@ void invalidate_inode_buffers(struct inode *inode) spin_unlock(&buffer_mapping->private_lock); } } +EXPORT_SYMBOL(invalidate_inode_buffers); /* * Remove any clean buffers from the inode's buffer list. This is called diff --git a/fs/cifs/file.c b/fs/cifs/file.c index b691b89..f0a81e6 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1475,7 +1475,11 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, cFYI(1, ("write_end for page %p from pos %lld with %d bytes", page, pos, copied)); - if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) + if (PageChecked(page)) { + if (copied == len) + SetPageUptodate(page); + ClearPageChecked(page); + } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) SetPageUptodate(page); if (!PageUptodate(page)) { @@ -2062,39 +2066,70 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, { pgoff_t index = pos >> PAGE_CACHE_SHIFT; loff_t offset = pos & (PAGE_CACHE_SIZE - 1); + loff_t page_start = pos & PAGE_MASK; + loff_t i_size; + struct page *page; + int rc = 0; cFYI(1, ("write_begin from %lld len %d", (long long)pos, len)); - *pagep = __grab_cache_page(mapping, index); - if (!*pagep) - return -ENOMEM; - - if (PageUptodate(*pagep)) - return 0; + page = __grab_cache_page(mapping, index); + if (!page) { + rc = -ENOMEM; + goto out; + } - /* If we are writing a full page it will be up to date, - no need to read from the server */ - if (len == PAGE_CACHE_SIZE && flags & AOP_FLAG_UNINTERRUPTIBLE) - return 0; + if (PageUptodate(page)) + goto out; - if ((file->f_flags & O_ACCMODE) != O_WRONLY) { - int rc; + /* + * If we write a full page it will be up to date, no need to read from + * the server. If the write is short, we'll end up doing a sync write + * instead. + */ + if (len == PAGE_CACHE_SIZE) + goto out; - /* might as well read a page, it is fast enough */ - rc = cifs_readpage_worker(file, *pagep, &offset); + /* + * optimize away the read when we have an oplock, and we're not + * expecting to use any of the data we'd be reading in. That + * is, when the page lies beyond the EOF, or straddles the EOF + * and the write will cover all of the existing data. + */ + if (CIFS_I(mapping->host)->clientCanCacheRead) { + i_size = i_size_read(mapping->host); + if (page_start >= i_size || + (offset == 0 && (pos + len) >= i_size)) { + zero_user_segments(page, 0, offset, + offset + len, + PAGE_CACHE_SIZE); + /* + * PageChecked means that the parts of the page + * to which we're not writing are considered up + * to date. Once the data is copied to the + * page, it can be set uptodate. + */ + SetPageChecked(page); + goto out; + } + } - /* we do not need to pass errors back - e.g. if we do not have read access to the file - because cifs_write_end will attempt synchronous writes - -- shaggy */ + if ((file->f_flags & O_ACCMODE) != O_WRONLY) { + /* + * might as well read a page, it is fast enough. If we get + * an error, we don't need to return it. cifs_write_end will + * do a sync write instead since PG_uptodate isn't set. + */ + cifs_readpage_worker(file, page, &page_start); } else { /* we could try using another file handle if there is one - but how would we lock it to prevent close of that handle racing with this read? In any case this will be written out by write_end so is fine */ } - - return 0; +out: + *pagep = page; + return rc; } const struct address_space_operations cifs_addr_ops = { diff --git a/fs/eventpoll.c b/fs/eventpoll.c index aec5c13f..96355d5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -102,6 +102,8 @@ #define EP_UNACTIVE_PTR ((void *) -1L) +#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry)) + struct epoll_filefd { struct file *file; int fd; @@ -200,6 +202,9 @@ struct eventpoll { * holding ->lock. */ struct epitem *ovflist; + + /* The user that created the eventpoll descriptor */ + struct user_struct *user; }; /* Wait structure used by the poll hooks */ @@ -227,9 +232,17 @@ struct ep_pqueue { }; /* + * Configuration options available inside /proc/sys/fs/epoll/ + */ +/* Maximum number of epoll devices, per user */ +static int max_user_instances __read_mostly; +/* Maximum number of epoll watched descriptors, per user */ +static int max_user_watches __read_mostly; + +/* * This mutex is used to serialize ep_free() and eventpoll_release_file(). */ -static struct mutex epmutex; +static DEFINE_MUTEX(epmutex); /* Safe wake up implementation */ static struct poll_safewake psw; @@ -240,6 +253,33 @@ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; +#ifdef CONFIG_SYSCTL + +#include <linux/sysctl.h> + +static int zero; + +ctl_table epoll_table[] = { + { + .procname = "max_user_instances", + .data = &max_user_instances, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = &zero, + }, + { + .procname = "max_user_watches", + .data = &max_user_watches, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = &zero, + }, + { .ctl_name = 0 } +}; +#endif /* CONFIG_SYSCTL */ + /* Setup the structure that is used as key for the RB tree */ static inline void ep_set_ffd(struct epoll_filefd *ffd, @@ -402,6 +442,8 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) /* At this point it is safe to free the eventpoll item */ kmem_cache_free(epi_cache, epi); + atomic_dec(&ep->user->epoll_watches); + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n", current, ep, file)); @@ -449,6 +491,8 @@ static void ep_free(struct eventpoll *ep) mutex_unlock(&epmutex); mutex_destroy(&ep->mtx); + atomic_dec(&ep->user->epoll_devs); + free_uid(ep->user); kfree(ep); } @@ -532,10 +576,19 @@ void eventpoll_release_file(struct file *file) static int ep_alloc(struct eventpoll **pep) { - struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL); + int error; + struct user_struct *user; + struct eventpoll *ep; - if (!ep) - return -ENOMEM; + user = get_current_user(); + error = -EMFILE; + if (unlikely(atomic_read(&user->epoll_devs) >= + max_user_instances)) + goto free_uid; + error = -ENOMEM; + ep = kzalloc(sizeof(*ep), GFP_KERNEL); + if (unlikely(!ep)) + goto free_uid; spin_lock_init(&ep->lock); mutex_init(&ep->mtx); @@ -544,12 +597,17 @@ static int ep_alloc(struct eventpoll **pep) INIT_LIST_HEAD(&ep->rdllist); ep->rbr = RB_ROOT; ep->ovflist = EP_UNACTIVE_PTR; + ep->user = user; *pep = ep; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n", current, ep)); return 0; + +free_uid: + free_uid(user); + return error; } /* @@ -703,9 +761,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct epitem *epi; struct ep_pqueue epq; - error = -ENOMEM; + if (unlikely(atomic_read(&ep->user->epoll_watches) >= + max_user_watches)) + return -ENOSPC; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) - goto error_return; + return -ENOMEM; /* Item initialization follow here ... */ INIT_LIST_HEAD(&epi->rdllink); @@ -735,6 +795,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, * install process. Namely an allocation for a wait queue failed due * high memory pressure. */ + error = -ENOMEM; if (epi->nwait < 0) goto error_unregister; @@ -765,6 +826,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, spin_unlock_irqrestore(&ep->lock, flags); + atomic_inc(&ep->user->epoll_watches); + /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&psw, &ep->poll_wait); @@ -789,7 +852,7 @@ error_unregister: spin_unlock_irqrestore(&ep->lock, flags); kmem_cache_free(epi_cache, epi); -error_return: + return error; } @@ -1078,6 +1141,7 @@ asmlinkage long sys_epoll_create1(int flags) flags & O_CLOEXEC); if (fd < 0) ep_free(ep); + atomic_inc(&ep->user->epoll_devs); error_return: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", @@ -1299,7 +1363,12 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, static int __init eventpoll_init(void) { - mutex_init(&epmutex); + struct sysinfo si; + + si_meminfo(&si); + max_user_instances = 128; + max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) / + EP_ITEM_COST; /* Initialize the structure used to perform safe poll wait head wake ups */ ep_poll_safewake_init(&psw); diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 9fd8889..70fc63a 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -167,7 +167,8 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) continue; if (host->h_server != ni->server) continue; - if (!nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) + if (ni->server && + !nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) continue; /* Move to head of hash chain. */ diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index c631a83..56b07673 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -181,6 +181,7 @@ lockd(void *vrqstp) } flush_signals(current); cancel_delayed_work_sync(&grace_period_end); + locks_end_grace(&lockd_manager); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index bb93946..b79ec93 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -225,12 +225,12 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) return 0; nfs4_save_user(&uid, &gid); + INIT_LIST_HEAD(dentries); filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY); status = PTR_ERR(filp); if (IS_ERR(filp)) goto out; - INIT_LIST_HEAD(dentries); status = vfs_readdir(filp, nfsd4_build_dentrylist, &dla); fput(filp); while (!list_empty(dentries)) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b0bebc5..1a052ac2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3261,6 +3261,7 @@ nfs4_state_shutdown(void) { cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work); destroy_workqueue(laundry_wq); + locks_end_grace(&nfsd4_manager); nfs4_lock_state(); nfs4_release_reclaim(); __nfs4_state_shutdown(); diff --git a/fs/ntfs/debug.h b/fs/ntfs/debug.h index 5e6724c..2142b1c 100644 --- a/fs/ntfs/debug.h +++ b/fs/ntfs/debug.h @@ -30,7 +30,8 @@ extern int debug_msgs; -#if 0 /* Fool kernel-doc since it doesn't do macros yet */ +extern void __ntfs_debug(const char *file, int line, const char *function, + const char *format, ...) __attribute__ ((format (printf, 4, 5))); /** * ntfs_debug - write a debug level message to syslog * @f: a printf format string containing the message @@ -39,11 +40,6 @@ extern int debug_msgs; * ntfs_debug() writes a DEBUG level message to the syslog but only if the * driver was compiled with -DDEBUG. Otherwise, the call turns into a NOP. */ -static void ntfs_debug(const char *f, ...); -#endif - -extern void __ntfs_debug (const char *file, int line, const char *function, - const char *format, ...) __attribute__ ((format (printf, 4, 5))); #define ntfs_debug(f, a...) \ __ntfs_debug(__FILE__, __LINE__, __func__, f, ##a) diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 7e947c6..3a178ec 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -112,7 +112,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, bh = bhs[i]; if (buffer_jbd(bh)) { - mlog(ML_ERROR, + mlog(ML_BH_IO, "trying to sync read a jbd " "managed bh (blocknr = %llu), skipping\n", (unsigned long long)bh->b_blocknr); @@ -147,15 +147,10 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, for (i = nr; i > 0; i--) { bh = bhs[i - 1]; - if (buffer_jbd(bh)) { - mlog(ML_ERROR, - "the journal got the buffer while it was " - "locked for io! (blocknr = %llu)\n", - (unsigned long long)bh->b_blocknr); - BUG(); - } + /* No need to wait on the buffer if it's managed by JBD. */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); - wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* Status won't be cleared from here on out, * so we can safely record this and loop back @@ -251,8 +246,6 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, ignore_cache = 1; } - /* XXX: Can we ever get this and *not* have the cached - * flag set? */ if (buffer_jbd(bh)) { if (ignore_cache) mlog(ML_BH_IO, "trying to sync read a jbd " diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 533a789..ba962d7 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c @@ -608,8 +608,10 @@ static int __init init_dlmfs_fs(void) 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), dlmfs_init_once); - if (!dlmfs_inode_cache) + if (!dlmfs_inode_cache) { + status = -ENOMEM; goto bail; + } cleanup_inode = 1; user_dlm_worker = create_singlethread_workqueue("user_dlm"); diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h index 39ec277..0c3cc03 100644 --- a/fs/ocfs2/dlm/userdlm.h +++ b/fs/ocfs2/dlm/userdlm.h @@ -33,7 +33,7 @@ #include <linux/workqueue.h> /* user_lock_res->l_flags flags. */ -#define USER_LOCK_ATTACHED (0x00000001) /* have we initialized +#define USER_LOCK_ATTACHED (0x00000001) /* we have initialized * the lvb */ #define USER_LOCK_BUSY (0x00000002) /* we are currently in * dlm_lock */ diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index ec68442..6e6cc0a 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2841,9 +2841,8 @@ static void ocfs2_unlock_ast(void *opaque, int error) lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; - spin_unlock_irqrestore(&lockres->l_lock, flags); - wake_up(&lockres->l_event); + spin_unlock_irqrestore(&lockres->l_lock, flags); mlog_exit_void(); } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index fef7ece..3fed9e3 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -85,7 +85,7 @@ enum ocfs2_unlock_action { }; /* ocfs2_lock_res->l_flags flags. */ -#define OCFS2_LOCK_ATTACHED (0x00000001) /* have we initialized +#define OCFS2_LOCK_ATTACHED (0x00000001) /* we have initialized * the lvb */ #define OCFS2_LOCK_BUSY (0x00000002) /* we are currently in * dlm_lock */ diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index faec2d8..9b76d41 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -740,6 +740,9 @@ static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb) static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb) { + if (!lksb->lksb_fsdlm.sb_lvbptr) + lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb + + sizeof(struct dlm_lksb); return (void *)(lksb->lksb_fsdlm.sb_lvbptr); } diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 0a6aa2c..b49884c 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -234,8 +234,8 @@ int ubifs_bg_thread(void *info) int err; struct ubifs_info *c = info; - ubifs_msg("background thread \"%s\" started, PID %d", - c->bgt_name, current->pid); + dbg_msg("background thread \"%s\" started, PID %d", + c->bgt_name, current->pid); set_freezable(); while (1) { diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 7186400..510ffa0 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -101,21 +101,24 @@ static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key, if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { switch (type) { case UBIFS_INO_KEY: - sprintf(p, "(%lu, %s)", key_inum(c, key), + sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key), get_key_type(type)); break; case UBIFS_DENT_KEY: case UBIFS_XENT_KEY: - sprintf(p, "(%lu, %s, %#08x)", key_inum(c, key), + sprintf(p, "(%lu, %s, %#08x)", + (unsigned long)key_inum(c, key), get_key_type(type), key_hash(c, key)); break; case UBIFS_DATA_KEY: - sprintf(p, "(%lu, %s, %u)", key_inum(c, key), + sprintf(p, "(%lu, %s, %u)", + (unsigned long)key_inum(c, key), get_key_type(type), key_block(c, key)); break; case UBIFS_TRUN_KEY: sprintf(p, "(%lu, %s)", - key_inum(c, key), get_key_type(type)); + (unsigned long)key_inum(c, key), + get_key_type(type)); break; default: sprintf(p, "(bad key type: %#08x, %#08x)", @@ -364,8 +367,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) le32_to_cpu(mst->ihead_lnum)); printk(KERN_DEBUG "\tihead_offs %u\n", le32_to_cpu(mst->ihead_offs)); - printk(KERN_DEBUG "\tindex_size %u\n", - le32_to_cpu(mst->index_size)); + printk(KERN_DEBUG "\tindex_size %llu\n", + (unsigned long long)le64_to_cpu(mst->index_size)); printk(KERN_DEBUG "\tlpt_lnum %u\n", le32_to_cpu(mst->lpt_lnum)); printk(KERN_DEBUG "\tlpt_offs %u\n", @@ -1589,7 +1592,7 @@ static struct fsck_inode *add_inode(struct ubifs_info *c, if (inum > c->highest_inum) { ubifs_err("too high inode number, max. is %lu", - c->highest_inum); + (unsigned long)c->highest_inum); return ERR_PTR(-EINVAL); } @@ -1668,16 +1671,18 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, ino_key_init(c, &key, inum); err = ubifs_lookup_level0(c, &key, &znode, &n); if (!err) { - ubifs_err("inode %lu not found in index", inum); + ubifs_err("inode %lu not found in index", (unsigned long)inum); return ERR_PTR(-ENOENT); } else if (err < 0) { - ubifs_err("error %d while looking up inode %lu", err, inum); + ubifs_err("error %d while looking up inode %lu", + err, (unsigned long)inum); return ERR_PTR(err); } zbr = &znode->zbranch[n]; if (zbr->len < UBIFS_INO_NODE_SZ) { - ubifs_err("bad node %lu node length %d", inum, zbr->len); + ubifs_err("bad node %lu node length %d", + (unsigned long)inum, zbr->len); return ERR_PTR(-EINVAL); } @@ -1697,7 +1702,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, kfree(ino); if (IS_ERR(fscki)) { ubifs_err("error %ld while adding inode %lu node", - PTR_ERR(fscki), inum); + PTR_ERR(fscki), (unsigned long)inum); return fscki; } @@ -1786,7 +1791,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, if (IS_ERR(fscki)) { err = PTR_ERR(fscki); ubifs_err("error %d while processing data node and " - "trying to find inode node %lu", err, inum); + "trying to find inode node %lu", + err, (unsigned long)inum); goto out_dump; } @@ -1819,7 +1825,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, if (IS_ERR(fscki)) { err = PTR_ERR(fscki); ubifs_err("error %d while processing entry node and " - "trying to find inode node %lu", err, inum); + "trying to find inode node %lu", + err, (unsigned long)inum); goto out_dump; } @@ -1832,7 +1839,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, err = PTR_ERR(fscki); ubifs_err("error %d while processing entry node and " "trying to find parent inode node %lu", - err, inum); + err, (unsigned long)inum); goto out_dump; } @@ -1923,7 +1930,8 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) fscki->references != 1) { ubifs_err("directory inode %lu has %d " "direntries which refer it, but " - "should be 1", fscki->inum, + "should be 1", + (unsigned long)fscki->inum, fscki->references); goto out_dump; } @@ -1931,27 +1939,29 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) fscki->references != 0) { ubifs_err("root inode %lu has non-zero (%d) " "direntries which refer it", - fscki->inum, fscki->references); + (unsigned long)fscki->inum, + fscki->references); goto out_dump; } if (fscki->calc_sz != fscki->size) { ubifs_err("directory inode %lu size is %lld, " "but calculated size is %lld", - fscki->inum, fscki->size, - fscki->calc_sz); + (unsigned long)fscki->inum, + fscki->size, fscki->calc_sz); goto out_dump; } if (fscki->calc_cnt != fscki->nlink) { ubifs_err("directory inode %lu nlink is %d, " "but calculated nlink is %d", - fscki->inum, fscki->nlink, - fscki->calc_cnt); + (unsigned long)fscki->inum, + fscki->nlink, fscki->calc_cnt); goto out_dump; } } else { if (fscki->references != fscki->nlink) { ubifs_err("inode %lu nlink is %d, but " - "calculated nlink is %d", fscki->inum, + "calculated nlink is %d", + (unsigned long)fscki->inum, fscki->nlink, fscki->references); goto out_dump; } @@ -1959,20 +1969,21 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) if (fscki->xattr_sz != fscki->calc_xsz) { ubifs_err("inode %lu has xattr size %u, but " "calculated size is %lld", - fscki->inum, fscki->xattr_sz, + (unsigned long)fscki->inum, fscki->xattr_sz, fscki->calc_xsz); goto out_dump; } if (fscki->xattr_cnt != fscki->calc_xcnt) { ubifs_err("inode %lu has %u xattrs, but " - "calculated count is %lld", fscki->inum, + "calculated count is %lld", + (unsigned long)fscki->inum, fscki->xattr_cnt, fscki->calc_xcnt); goto out_dump; } if (fscki->xattr_nms != fscki->calc_xnms) { ubifs_err("inode %lu has xattr names' size %u, but " "calculated names' size is %lld", - fscki->inum, fscki->xattr_nms, + (unsigned long)fscki->inum, fscki->xattr_nms, fscki->calc_xnms); goto out_dump; } @@ -1985,11 +1996,12 @@ out_dump: ino_key_init(c, &key, fscki->inum); err = ubifs_lookup_level0(c, &key, &znode, &n); if (!err) { - ubifs_err("inode %lu not found in index", fscki->inum); + ubifs_err("inode %lu not found in index", + (unsigned long)fscki->inum); return -ENOENT; } else if (err < 0) { ubifs_err("error %d while looking up inode %lu", - err, fscki->inum); + err, (unsigned long)fscki->inum); return err; } @@ -2007,7 +2019,7 @@ out_dump: } ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", - fscki->inum, zbr->lnum, zbr->offs); + (unsigned long)fscki->inum, zbr->lnum, zbr->offs); dbg_dump_node(c, ino); kfree(ino); return -EINVAL; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 526c01e..0422c98 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -161,7 +161,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, return ERR_PTR(-EINVAL); } ubifs_warn("running out of inode numbers (current %lu, max %d)", - c->highest_inum, INUM_WATERMARK); + (unsigned long)c->highest_inum, INUM_WATERMARK); } inode->i_ino = ++c->highest_inum; @@ -428,7 +428,8 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) dbg_gen("feed '%s', ino %llu, new f_pos %#x", dent->name, (unsigned long long)le64_to_cpu(dent->inum), key_hash_flash(c, &dent->key)); - ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum); + ubifs_assert(le64_to_cpu(dent->ch.sqnum) > + ubifs_inode(dir)->creat_sqnum); nm.len = le16_to_cpu(dent->nlen); over = filldir(dirent, dent->name, nm.len, file->f_pos, diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 51cf511..2624411 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -72,7 +72,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, return err; } - ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum); + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); if (len <= 0 || len > UBIFS_BLOCK_SIZE) @@ -626,7 +626,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, dn = bu->buf + (bu->zbranch[nn].offs - offs); - ubifs_assert(dn->ch.sqnum > + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); @@ -691,32 +691,22 @@ out_err: /** * ubifs_do_bulk_read - do bulk-read. * @c: UBIFS file-system description object - * @page1: first page + * @bu: bulk-read information + * @page1: first page to read * * This function returns %1 if the bulk-read is done, otherwise %0 is returned. */ -static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) +static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, + struct page *page1) { pgoff_t offset = page1->index, end_index; struct address_space *mapping = page1->mapping; struct inode *inode = mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); - struct bu_info *bu; int err, page_idx, page_cnt, ret = 0, n = 0; + int allocate = bu->buf ? 0 : 1; loff_t isize; - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); - if (!bu) - return 0; - - bu->buf_len = c->bulk_read_buf_size; - bu->buf = kmalloc(bu->buf_len, GFP_NOFS); - if (!bu->buf) - goto out_free; - - data_key_init(c, &bu->key, inode->i_ino, - offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); - err = ubifs_tnc_get_bu_keys(c, bu); if (err) goto out_warn; @@ -735,12 +725,25 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) * together. If all the pages were like this, bulk-read would * reduce performance, so we turn it off for a while. */ - ui->read_in_a_row = 0; - ui->bulk_read = 0; - goto out_free; + goto out_bu_off; } if (bu->cnt) { + if (allocate) { + /* + * Allocate bulk-read buffer depending on how many data + * nodes we are going to read. + */ + bu->buf_len = bu->zbranch[bu->cnt - 1].offs + + bu->zbranch[bu->cnt - 1].len - + bu->zbranch[0].offs; + ubifs_assert(bu->buf_len > 0); + ubifs_assert(bu->buf_len <= c->leb_size); + bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); + if (!bu->buf) + goto out_bu_off; + } + err = ubifs_tnc_bulk_read(c, bu); if (err) goto out_warn; @@ -779,13 +782,17 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) ui->last_page_read = offset + page_idx - 1; out_free: - kfree(bu->buf); - kfree(bu); + if (allocate) + kfree(bu->buf); return ret; out_warn: ubifs_warn("ignoring error %d and skipping bulk-read", err); goto out_free; + +out_bu_off: + ui->read_in_a_row = ui->bulk_read = 0; + goto out_free; } /** @@ -803,18 +810,20 @@ static int ubifs_bulk_read(struct page *page) struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = page->index, last_page_read = ui->last_page_read; - int ret = 0; + struct bu_info *bu; + int err = 0, allocated = 0; ui->last_page_read = index; - if (!c->bulk_read) return 0; + /* - * Bulk-read is protected by ui_mutex, but it is an optimization, so - * don't bother if we cannot lock the mutex. + * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, + * so don't bother if we cannot lock the mutex. */ if (!mutex_trylock(&ui->ui_mutex)) return 0; + if (index != last_page_read + 1) { /* Turn off bulk-read if we stop reading sequentially */ ui->read_in_a_row = 1; @@ -822,6 +831,7 @@ static int ubifs_bulk_read(struct page *page) ui->bulk_read = 0; goto out_unlock; } + if (!ui->bulk_read) { ui->read_in_a_row += 1; if (ui->read_in_a_row < 3) @@ -829,10 +839,35 @@ static int ubifs_bulk_read(struct page *page) /* Three reads in a row, so switch on bulk-read */ ui->bulk_read = 1; } - ret = ubifs_do_bulk_read(c, page); + + /* + * If possible, try to use pre-allocated bulk-read information, which + * is protected by @c->bu_mutex. + */ + if (mutex_trylock(&c->bu_mutex)) + bu = &c->bu; + else { + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); + if (!bu) + goto out_unlock; + + bu->buf = NULL; + allocated = 1; + } + + bu->buf_len = c->max_bu_buf_len; + data_key_init(c, &bu->key, inode->i_ino, + page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); + err = ubifs_do_bulk_read(c, bu, page); + + if (!allocated) + mutex_unlock(&c->bu_mutex); + else + kfree(bu); + out_unlock: mutex_unlock(&ui->ui_mutex); - return ret; + return err; } static int ubifs_readpage(struct file *file, struct page *page) diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 22993f8..f91b745 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -690,8 +690,9 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR; struct ubifs_inode *ui = ubifs_inode(inode); - dbg_jnl("ino %lu, blk %u, len %d, key %s", key_inum(c, key), - key_block(c, key), len, DBGKEY(key)); + dbg_jnl("ino %lu, blk %u, len %d, key %s", + (unsigned long)key_inum(c, key), key_block(c, key), len, + DBGKEY(key)); ubifs_assert(len <= UBIFS_BLOCK_SIZE); data = kmalloc(dlen, GFP_NOFS); @@ -1128,7 +1129,8 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, ino_t inum = inode->i_ino; unsigned int blk; - dbg_jnl("ino %lu, size %lld -> %lld", inum, old_size, new_size); + dbg_jnl("ino %lu, size %lld -> %lld", + (unsigned long)inum, old_size, new_size); ubifs_assert(!ui->data_len); ubifs_assert(S_ISREG(inode->i_mode)); ubifs_assert(mutex_is_locked(&ui->ui_mutex)); diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h index 9ee6508..3f1f16b 100644 --- a/fs/ubifs/key.h +++ b/fs/ubifs/key.h @@ -345,7 +345,7 @@ static inline int key_type_flash(const struct ubifs_info *c, const void *k) { const union ubifs_key *key = k; - return le32_to_cpu(key->u32[1]) >> UBIFS_S_KEY_BLOCK_BITS; + return le32_to_cpu(key->j32[1]) >> UBIFS_S_KEY_BLOCK_BITS; } /** @@ -416,7 +416,7 @@ static inline unsigned int key_block_flash(const struct ubifs_info *c, { const union ubifs_key *key = k; - return le32_to_cpu(key->u32[1]) & UBIFS_S_KEY_BLOCK_MASK; + return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_BLOCK_MASK; } /** diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index eed5a00..a41434b 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -571,8 +571,6 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c, /* We assume here that LEB zero is never an LPT LEB */ if (nnode->nbranch[iip].lnum) return ubifs_get_pnode(c, nnode, iip); - else - return NULL; } /* Go up while can't go right */ diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 02d3462..9bd5a43 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -105,7 +105,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) list_add_tail(&orphan->list, &c->orph_list); list_add_tail(&orphan->new_list, &c->orph_new); spin_unlock(&c->orphan_lock); - dbg_gen("ino %lu", inum); + dbg_gen("ino %lu", (unsigned long)inum); return 0; } @@ -132,14 +132,16 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) else { if (o->dnext) { spin_unlock(&c->orphan_lock); - dbg_gen("deleted twice ino %lu", inum); + dbg_gen("deleted twice ino %lu", + (unsigned long)inum); return; } if (o->cnext) { o->dnext = c->orph_dnext; c->orph_dnext = o; spin_unlock(&c->orphan_lock); - dbg_gen("delete later ino %lu", inum); + dbg_gen("delete later ino %lu", + (unsigned long)inum); return; } rb_erase(p, &c->orph_tree); @@ -151,12 +153,12 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) } spin_unlock(&c->orphan_lock); kfree(o); - dbg_gen("inum %lu", inum); + dbg_gen("inum %lu", (unsigned long)inum); return; } } spin_unlock(&c->orphan_lock); - dbg_err("missing orphan ino %lu", inum); + dbg_err("missing orphan ino %lu", (unsigned long)inum); dbg_dump_stack(); } @@ -448,7 +450,7 @@ static void erase_deleted(struct ubifs_info *c) rb_erase(&orphan->rb, &c->orph_tree); list_del(&orphan->list); c->tot_orphans -= 1; - dbg_gen("deleting orphan ino %lu", orphan->inum); + dbg_gen("deleting orphan ino %lu", (unsigned long)orphan->inum); kfree(orphan); } c->orph_dnext = NULL; @@ -536,8 +538,8 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum) list_add_tail(&orphan->list, &c->orph_list); orphan->dnext = c->orph_dnext; c->orph_dnext = orphan; - dbg_mnt("ino %lu, new %d, tot %d", - inum, c->new_orphans, c->tot_orphans); + dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum, + c->new_orphans, c->tot_orphans); return 0; } @@ -609,7 +611,8 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; for (i = 0; i < n; i++) { inum = le64_to_cpu(orph->inos[i]); - dbg_rcvry("deleting orphaned inode %lu", inum); + dbg_rcvry("deleting orphaned inode %lu", + (unsigned long)inum); err = ubifs_tnc_remove_ino(c, inum); if (err) return err; @@ -840,8 +843,8 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, if (inum != ci->last_ino) { /* Lowest node type is the inode node, so it comes first */ if (key_type(c, &zbr->key) != UBIFS_INO_KEY) - ubifs_err("found orphan node ino %lu, type %d", inum, - key_type(c, &zbr->key)); + ubifs_err("found orphan node ino %lu, type %d", + (unsigned long)inum, key_type(c, &zbr->key)); ci->last_ino = inum; ci->tot_inos += 1; err = ubifs_tnc_read_node(c, zbr, ci->node); @@ -853,7 +856,8 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, /* Must be recorded as an orphan */ if (!dbg_find_check_orphan(&ci->root, inum) && !dbg_find_orphan(c, inum)) { - ubifs_err("missing orphan, ino %lu", inum); + ubifs_err("missing orphan, ino %lu", + (unsigned long)inum); ci->missing += 1; } } diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 77d26c1..90acac6 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -168,12 +168,12 @@ static int write_rcvrd_mst_node(struct ubifs_info *c, struct ubifs_mst_node *mst) { int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; - uint32_t save_flags; + __le32 save_flags; dbg_rcvry("recovery"); save_flags = mst->flags; - mst->flags = cpu_to_le32(le32_to_cpu(mst->flags) | UBIFS_MST_RCVRY); + mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM); @@ -1435,13 +1435,13 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); if (err) goto out; - dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", e->inum, lnum, offs, - i_size, e->d_size); + dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", + (unsigned long)e->inum, lnum, offs, i_size, e->d_size); return 0; out: ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d", - e->inum, e->i_size, e->d_size, err); + (unsigned long)e->inum, e->i_size, e->d_size, err); return err; } @@ -1472,7 +1472,8 @@ int ubifs_recover_size(struct ubifs_info *c) return err; if (err == -ENOENT) { /* Remove data nodes that have no inode */ - dbg_rcvry("removing ino %lu", e->inum); + dbg_rcvry("removing ino %lu", + (unsigned long)e->inum); err = ubifs_tnc_remove_ino(c, e->inum); if (err) return err; @@ -1493,8 +1494,8 @@ int ubifs_recover_size(struct ubifs_info *c) return PTR_ERR(inode); if (inode->i_size < e->d_size) { dbg_rcvry("ino %lu size %lld -> %lld", - e->inum, e->d_size, - inode->i_size); + (unsigned long)e->inum, + e->d_size, inode->i_size); inode->i_size = e->d_size; ubifs_inode(inode)->ui_size = e->d_size; e->inode = inode; diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 7399692..21f7d04 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -1065,7 +1065,7 @@ int ubifs_replay_journal(struct ubifs_info *c) ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, - c->highest_inum); + (unsigned long)c->highest_inum); out: destroy_replay_tree(c); destroy_bud_list(c); diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 2bf753b..0f39235 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -81,6 +81,7 @@ static int create_default_filesystem(struct ubifs_info *c) int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; int min_leb_cnt = UBIFS_MIN_LEB_CNT; uint64_t tmp64, main_bytes; + __le64 tmp_le64; /* Some functions called from here depend on the @c->key_len filed */ c->key_len = UBIFS_SK_LEN; @@ -295,10 +296,10 @@ static int create_default_filesystem(struct ubifs_info *c) ino->ch.node_type = UBIFS_INO_NODE; ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); ino->nlink = cpu_to_le32(2); - tmp = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); - ino->atime_sec = tmp; - ino->ctime_sec = tmp; - ino->mtime_sec = tmp; + tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); + ino->atime_sec = tmp_le64; + ino->ctime_sec = tmp_le64; + ino->mtime_sec = tmp_le64; ino->atime_nsec = 0; ino->ctime_nsec = 0; ino->mtime_nsec = 0; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 8780efb..d80b2ae 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -36,6 +36,12 @@ #include <linux/mount.h> #include "ubifs.h" +/* + * Maximum amount of memory we may 'kmalloc()' without worrying that we are + * allocating too much. + */ +#define UBIFS_KMALLOC_OK (128*1024) + /* Slab cache for UBIFS inodes */ struct kmem_cache *ubifs_inode_slab; @@ -561,18 +567,11 @@ static int init_constants_early(struct ubifs_info *c) * calculations when reporting free space. */ c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; - /* Buffer size for bulk-reads */ - c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; - if (c->bulk_read_buf_size > c->leb_size) - c->bulk_read_buf_size = c->leb_size; - if (c->bulk_read_buf_size > 128 * 1024) { - /* Check if we can kmalloc more than 128KiB */ - void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL); - kfree(try); - if (!try) - c->bulk_read_buf_size = 128 * 1024; - } + /* Buffer size for bulk-reads */ + c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; + if (c->max_bu_buf_len > c->leb_size) + c->max_bu_buf_len = c->leb_size; return 0; } @@ -992,6 +991,34 @@ static void destroy_journal(struct ubifs_info *c) } /** + * bu_init - initialize bulk-read information. + * @c: UBIFS file-system description object + */ +static void bu_init(struct ubifs_info *c) +{ + ubifs_assert(c->bulk_read == 1); + + if (c->bu.buf) + return; /* Already initialized */ + +again: + c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); + if (!c->bu.buf) { + if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { + c->max_bu_buf_len = UBIFS_KMALLOC_OK; + goto again; + } + + /* Just disable bulk-read */ + ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, " + "disabling it", c->max_bu_buf_len); + c->mount_opts.bulk_read = 1; + c->bulk_read = 0; + return; + } +} + +/** * mount_ubifs - mount UBIFS file-system. * @c: UBIFS file-system description object * @@ -1059,6 +1086,13 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } + if (c->bulk_read == 1) + bu_init(c); + + /* + * We have to check all CRCs, even for data nodes, when we mount the FS + * (specifically, when we are replaying). + */ c->always_chk_crc = 1; err = ubifs_read_superblock(c); @@ -1289,6 +1323,7 @@ out_cbuf: out_dereg: dbg_failure_mode_deregistration(c); out_free: + kfree(c->bu.buf); vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); @@ -1325,10 +1360,11 @@ static void ubifs_umount(struct ubifs_info *c) kfree(c->cbuf); kfree(c->rcvrd_mst_node); kfree(c->mst_node); + kfree(c->bu.buf); + vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); UBIFS_DBG(vfree(c->dbg_buf)); - vfree(c->ileb_buf); dbg_failure_mode_deregistration(c); } @@ -1626,6 +1662,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) ubifs_err("invalid or unknown remount parameter"); return err; } + if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { err = ubifs_remount_rw(c); if (err) @@ -1633,6 +1670,14 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) ubifs_remount_ro(c); + if (c->bulk_read == 1) + bu_init(c); + else { + dbg_gen("disable bulk-read"); + kfree(c->bu.buf); + c->bu.buf = NULL; + } + return 0; } @@ -1723,6 +1768,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) mutex_init(&c->log_mutex); mutex_init(&c->mst_mutex); mutex_init(&c->umount_mutex); + mutex_init(&c->bu_mutex); init_waitqueue_head(&c->cmt_wq); c->buds = RB_ROOT; c->old_idx = RB_ROOT; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index d27fd91..6eef534 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -1501,7 +1501,12 @@ out: * @bu: bulk-read parameters and results * * Lookup consecutive data node keys for the same inode that reside - * consecutively in the same LEB. + * consecutively in the same LEB. This function returns zero in case of success + * and a negative error code in case of failure. + * + * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function + * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares + * maxumum possible amount of nodes for bulk-read. */ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) { @@ -2677,7 +2682,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) struct ubifs_dent_node *xent, *pxent = NULL; struct qstr nm = { .name = NULL }; - dbg_tnc("ino %lu", inum); + dbg_tnc("ino %lu", (unsigned long)inum); /* * Walk all extended attribute entries and remove them together with @@ -2697,7 +2702,8 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) } xattr_inum = le64_to_cpu(xent->inum); - dbg_tnc("xent '%s', ino %lu", xent->name, xattr_inum); + dbg_tnc("xent '%s', ino %lu", xent->name, + (unsigned long)xattr_inum); nm.name = xent->name; nm.len = le16_to_cpu(xent->nlen); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a7bd32f..46b1725 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -753,7 +753,7 @@ struct ubifs_znode { }; /** - * struct bu_info - bulk-read information + * struct bu_info - bulk-read information. * @key: first data node key * @zbranch: zbranches of data nodes to bulk read * @buf: buffer to read into @@ -969,7 +969,10 @@ struct ubifs_mount_opts { * @mst_node: master node * @mst_offs: offset of valid master node * @mst_mutex: protects the master node area, @mst_node, and @mst_offs - * @bulk_read_buf_size: buffer size for bulk-reads + * + * @max_bu_buf_len: maximum bulk-read buffer length + * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu + * @bu: pre-allocated bulk-read information * * @log_lebs: number of logical eraseblocks in the log * @log_bytes: log size in bytes @@ -1217,7 +1220,10 @@ struct ubifs_info { struct ubifs_mst_node *mst_node; int mst_offs; struct mutex mst_mutex; - int bulk_read_buf_size; + + int max_bu_buf_len; + struct mutex bu_mutex; + struct bu_info bu; int log_lebs; long long log_bytes; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 6e74b11..30ebde4 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -106,6 +106,7 @@ void udf_clear_inode(struct inode *inode) udf_truncate_tail_extent(inode); unlock_kernel(); write_inode_now(inode, 0); + invalidate_inode_buffers(inode); } iinfo = UDF_I(inode); kfree(iinfo->i_ext.i_data); |