diff options
author | Richard Weinberger <richard@nod.at> | 2018-07-12 13:01:57 +0200 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2018-08-15 00:25:21 +0200 |
commit | 6eb61d587f4515e4be5669eff383c0185009954f (patch) | |
tree | a5d3183e104338319152a888e0ff980c2546e6e1 /fs/ubifs/file.c | |
parent | 54169ddd382d461f7c01cc5a5182a4b4bc539489 (diff) | |
download | op-kernel-dev-6eb61d587f4515e4be5669eff383c0185009954f.zip op-kernel-dev-6eb61d587f4515e4be5669eff383c0185009954f.tar.gz |
ubifs: Pass struct ubifs_info to ubifs_assert()
This allows us to have more context in ubifs_assert()
and take different actions depending on the configuration.
Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'fs/ubifs/file.c')
-rw-r--r-- | fs/ubifs/file.c | 47 |
1 files changed, 27 insertions, 20 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 745b3f8..1b78f2e 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -71,7 +71,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, return err; } - ubifs_assert(le64_to_cpu(dn->ch.sqnum) > + ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); if (len <= 0 || len > UBIFS_BLOCK_SIZE) @@ -115,12 +115,13 @@ static int do_readpage(struct page *page) unsigned int block, beyond; struct ubifs_data_node *dn; struct inode *inode = page->mapping->host; + struct ubifs_info *c = inode->i_sb->s_fs_info; loff_t i_size = i_size_read(inode); dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", inode->i_ino, page->index, i_size, page->flags); - ubifs_assert(!PageChecked(page)); - ubifs_assert(!PagePrivate(page)); + ubifs_assert(c, !PageChecked(page)); + ubifs_assert(c, !PagePrivate(page)); addr = kmap(page); @@ -441,8 +442,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, int skipped_read = 0; struct page *page; - ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size); - ubifs_assert(!c->ro_media && !c->ro_mount); + ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size); + ubifs_assert(c, !c->ro_media && !c->ro_mount); if (unlikely(c->ro_error)) return -EROFS; @@ -481,7 +482,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, err = allocate_budget(c, page, ui, appending); if (unlikely(err)) { - ubifs_assert(err == -ENOSPC); + ubifs_assert(c, err == -ENOSPC); /* * If we skipped reading the page because we were going to * write all of it, then it is not up to date. @@ -498,7 +499,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, * everything and fall-back to slow-path. */ if (appending) { - ubifs_assert(mutex_is_locked(&ui->ui_mutex)); + ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); mutex_unlock(&ui->ui_mutex); } unlock_page(page); @@ -595,7 +596,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, * '__set_page_dirty_nobuffers()'. */ __mark_inode_dirty(inode, I_DIRTY_DATASYNC); - ubifs_assert(mutex_is_locked(&ui->ui_mutex)); + ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); mutex_unlock(&ui->ui_mutex); } @@ -648,7 +649,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, dn = bu->buf + (bu->zbranch[nn].offs - offs); - ubifs_assert(le64_to_cpu(dn->ch.sqnum) > + ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); @@ -767,8 +768,8 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, bu->buf_len = bu->zbranch[bu->cnt - 1].offs + bu->zbranch[bu->cnt - 1].len - bu->zbranch[0].offs; - ubifs_assert(bu->buf_len > 0); - ubifs_assert(bu->buf_len <= c->leb_size); + ubifs_assert(c, bu->buf_len > 0); + ubifs_assert(c, bu->buf_len <= c->leb_size); bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); if (!bu->buf) goto out_bu_off; @@ -920,7 +921,7 @@ static int do_writepage(struct page *page, int len) #ifdef UBIFS_DEBUG struct ubifs_inode *ui = ubifs_inode(inode); spin_lock(&ui->ui_lock); - ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT); + ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT); spin_unlock(&ui->ui_lock); #endif @@ -949,7 +950,7 @@ static int do_writepage(struct page *page, int len) ubifs_ro_mode(c, err); } - ubifs_assert(PagePrivate(page)); + ubifs_assert(c, PagePrivate(page)); if (PageChecked(page)) release_new_page_budget(c); else @@ -1014,6 +1015,7 @@ static int do_writepage(struct page *page, int len) static int ubifs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; + struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); loff_t i_size = i_size_read(inode), synced_i_size; pgoff_t end_index = i_size >> PAGE_SHIFT; @@ -1022,7 +1024,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) dbg_gen("ino %lu, pg %lu, pg flags %#lx", inode->i_ino, page->index, page->flags); - ubifs_assert(PagePrivate(page)); + ubifs_assert(c, PagePrivate(page)); /* Is the page fully outside @i_size? (truncate in progress) */ if (page->index > end_index || (page->index == end_index && !len)) { @@ -1167,7 +1169,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, * 'ubifs_jnl_truncate()' will see an already * truncated (and up to date) data node. */ - ubifs_assert(PagePrivate(page)); + ubifs_assert(c, PagePrivate(page)); clear_page_dirty_for_io(page); if (UBIFS_BLOCKS_PER_PAGE_SHIFT) @@ -1303,7 +1305,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset, struct inode *inode = page->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; - ubifs_assert(PagePrivate(page)); + ubifs_assert(c, PagePrivate(page)); if (offset || length < PAGE_SIZE) /* Partial page remains dirty */ return; @@ -1461,13 +1463,15 @@ static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from) static int ubifs_set_page_dirty(struct page *page) { int ret; + struct inode *inode = page->mapping->host; + struct ubifs_info *c = inode->i_sb->s_fs_info; ret = __set_page_dirty_nobuffers(page); /* * An attempt to dirty a page without budgeting for it - should not * happen. */ - ubifs_assert(ret == 0); + ubifs_assert(c, ret == 0); return ret; } @@ -1496,14 +1500,17 @@ static int ubifs_migrate_page(struct address_space *mapping, static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) { + struct inode *inode = page->mapping->host; + struct ubifs_info *c = inode->i_sb->s_fs_info; + /* * An attempt to release a dirty page without budgeting for it - should * not happen. */ if (PageWriteback(page)) return 0; - ubifs_assert(PagePrivate(page)); - ubifs_assert(0); + ubifs_assert(c, PagePrivate(page)); + ubifs_assert(c, 0); ClearPagePrivate(page); ClearPageChecked(page); return 1; @@ -1524,7 +1531,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf) dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, i_size_read(inode)); - ubifs_assert(!c->ro_media && !c->ro_mount); + ubifs_assert(c, !c->ro_media && !c->ro_mount); if (unlikely(c->ro_error)) return VM_FAULT_SIGBUS; /* -EROFS */ |