summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/afs/file.c2
-rw-r--r--fs/afs/fsclient.c8
-rw-r--r--fs/afs/mntpt.c4
-rw-r--r--fs/aio.c30
-rw-r--r--fs/bio-integrity.c10
-rw-r--r--fs/btrfs/check-integrity.c1
-rw-r--r--fs/btrfs/compression.c12
-rw-r--r--fs/btrfs/extent_io.c16
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c26
-rw-r--r--fs/btrfs/lzo.c4
-rw-r--r--fs/btrfs/scrub.c8
-rw-r--r--fs/btrfs/zlib.c4
-rw-r--r--fs/cifs/cifsacl.c1
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/cramfs/inode.c6
-rw-r--r--fs/dcache.c29
-rw-r--r--fs/debugfs/inode.c149
-rw-r--r--fs/devpts/inode.c85
-rw-r--r--fs/dlm/lowcomms.c22
-rw-r--r--fs/exec.c18
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/ext2/dir.c4
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/gfs2/aops.c12
-rw-r--r--fs/gfs2/lops.c8
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/jbd/journal.c14
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c14
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/jffs2/compr.c2
-rw-r--r--fs/logfs/dir.c18
-rw-r--r--fs/logfs/readwrite.c38
-rw-r--r--fs/logfs/segment.c4
-rw-r--r--fs/minix/dir.c4
-rw-r--r--fs/namei.c126
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/dir.c8
-rw-r--r--fs/nfs/idmap.c1
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nilfs2/cpfile.c94
-rw-r--r--fs/nilfs2/dat.c38
-rw-r--r--fs/nilfs2/dir.c4
-rw-r--r--fs/nilfs2/ifile.c4
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/page.c8
-rw-r--r--fs/nilfs2/recovery.c4
-rw-r--r--fs/nilfs2/segbuf.c4
-rw-r--r--fs/nilfs2/sufile.c68
-rw-r--r--fs/ntfs/aops.c20
-rw-r--r--fs/ntfs/attrib.c20
-rw-r--r--fs/ntfs/file.c16
-rw-r--r--fs/ntfs/layout.h4
-rw-r--r--fs/ntfs/super.c8
-rw-r--r--fs/ocfs2/aops.c16
-rw-r--r--fs/pipe.c8
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/quota/dquot.c1
-rw-r--r--fs/reiserfs/lbalance.c2
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/tail_conversion.c4
-rw-r--r--fs/splice.c7
-rw-r--r--fs/squashfs/file.c8
-rw-r--r--fs/squashfs/symlink.c4
-rw-r--r--fs/super.c1
-rw-r--r--fs/sysfs/dir.c224
-rw-r--r--fs/sysfs/inode.c11
-rw-r--r--fs/sysfs/mount.c2
-rw-r--r--fs/sysfs/sysfs.h17
-rw-r--r--fs/ubifs/file.c4
-rw-r--r--fs/udf/file.c4
79 files changed, 873 insertions, 482 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 1497ddf..f95ae3a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -4,6 +4,10 @@
menu "File systems"
+# Use unaligned word dcache accesses
+config DCACHE_WORD_ACCESS
+ bool
+
if BLOCK
source "fs/ext2/Kconfig"
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 14d89fa..8f6e923 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -251,7 +251,7 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
ASSERT(key != NULL);
vnode = AFS_FS_I(mapping->host);
- if (vnode->flags & AFS_VNODE_DELETED) {
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
_leave(" = -ESTALE");
return -ESTALE;
}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 2f213d1..b960ff0 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -365,10 +365,10 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
_debug("extract data");
if (call->count > 0) {
page = call->reply3;
- buffer = kmap_atomic(page, KM_USER0);
+ buffer = kmap_atomic(page);
ret = afs_extract_data(call, skb, last, buffer,
call->count);
- kunmap_atomic(buffer, KM_USER0);
+ kunmap_atomic(buffer);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
@@ -411,9 +411,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
if (call->count < PAGE_SIZE) {
_debug("clear");
page = call->reply3;
- buffer = kmap_atomic(page, KM_USER0);
+ buffer = kmap_atomic(page);
memset(buffer + call->count, 0, PAGE_SIZE - call->count);
- kunmap_atomic(buffer, KM_USER0);
+ kunmap_atomic(buffer);
}
_leave(" = 0 [done]");
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 8f4ce26..298cf89 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -200,9 +200,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
if (PageError(page))
goto error;
- buf = kmap_atomic(page, KM_USER0);
+ buf = kmap_atomic(page);
memcpy(devname, buf, size);
- kunmap_atomic(buf, KM_USER0);
+ kunmap_atomic(buf);
page_cache_release(page);
page = NULL;
}
diff --git a/fs/aio.c b/fs/aio.c
index 59b431a..c7acaf3 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
info->nr = nr_events; /* trusted copy */
- ring = kmap_atomic(info->ring_pages[0], KM_USER0);
+ ring = kmap_atomic(info->ring_pages[0]);
ring->nr = nr_events; /* user copy */
ring->id = ctx->user_id;
ring->head = ring->tail = 0;
@@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx)
ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring);
- kunmap_atomic(ring, KM_USER0);
+ kunmap_atomic(ring);
return 0;
}
/* aio_ring_event: returns a pointer to the event at the given index from
- * kmap_atomic(, km). Release the pointer with put_aio_ring_event();
+ * kmap_atomic(). Release the pointer with put_aio_ring_event();
*/
#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
-#define aio_ring_event(info, nr, km) ({ \
+#define aio_ring_event(info, nr) ({ \
unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
struct io_event *__event; \
__event = kmap_atomic( \
- (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
+ (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
__event += pos % AIO_EVENTS_PER_PAGE; \
__event; \
})
-#define put_aio_ring_event(event, km) do { \
+#define put_aio_ring_event(event) do { \
struct io_event *__event = (event); \
(void)__event; \
- kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
+ kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
} while(0)
static void ctx_rcu_free(struct rcu_head *head)
@@ -998,10 +998,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
if (kiocbIsCancelled(iocb))
goto put_rq;
- ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
+ ring = kmap_atomic(info->ring_pages[0]);
tail = info->tail;
- event = aio_ring_event(info, tail, KM_IRQ0);
+ event = aio_ring_event(info, tail);
if (++tail >= info->nr)
tail = 0;
@@ -1022,8 +1022,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
info->tail = tail;
ring->tail = tail;
- put_aio_ring_event(event, KM_IRQ0);
- kunmap_atomic(ring, KM_IRQ1);
+ put_aio_ring_event(event);
+ kunmap_atomic(ring);
pr_debug("added to ring %p at [%lu]\n", iocb, tail);
@@ -1068,7 +1068,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
unsigned long head;
int ret = 0;
- ring = kmap_atomic(info->ring_pages[0], KM_USER0);
+ ring = kmap_atomic(info->ring_pages[0]);
dprintk("in aio_read_evt h%lu t%lu m%lu\n",
(unsigned long)ring->head, (unsigned long)ring->tail,
(unsigned long)ring->nr);
@@ -1080,18 +1080,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
head = ring->head % info->nr;
if (head != ring->tail) {
- struct io_event *evp = aio_ring_event(info, head, KM_USER1);
+ struct io_event *evp = aio_ring_event(info, head);
*ent = *evp;
head = (head + 1) % info->nr;
smp_mb(); /* finish reading the event before updatng the head */
ring->head = head;
ret = 1;
- put_aio_ring_event(evp, KM_USER1);
+ put_aio_ring_event(evp);
}
spin_unlock(&info->ring_lock);
out:
- kunmap_atomic(ring, KM_USER0);
+ kunmap_atomic(ring);
dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
(unsigned long)ring->head, (unsigned long)ring->tail);
return ret;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index c2183f3..e85c04b 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -357,7 +357,7 @@ static void bio_integrity_generate(struct bio *bio)
bix.sector_size = bi->sector_size;
bio_for_each_segment(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page, KM_USER0);
+ void *kaddr = kmap_atomic(bv->bv_page);
bix.data_buf = kaddr + bv->bv_offset;
bix.data_size = bv->bv_len;
bix.prot_buf = prot_buf;
@@ -371,7 +371,7 @@ static void bio_integrity_generate(struct bio *bio)
total += sectors * bi->tuple_size;
BUG_ON(total > bio->bi_integrity->bip_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
}
@@ -498,7 +498,7 @@ static int bio_integrity_verify(struct bio *bio)
bix.sector_size = bi->sector_size;
bio_for_each_segment(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page, KM_USER0);
+ void *kaddr = kmap_atomic(bv->bv_page);
bix.data_buf = kaddr + bv->bv_offset;
bix.data_size = bv->bv_len;
bix.prot_buf = prot_buf;
@@ -507,7 +507,7 @@ static int bio_integrity_verify(struct bio *bio)
ret = bi->verify_fn(&bix);
if (ret) {
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
return ret;
}
@@ -517,7 +517,7 @@ static int bio_integrity_verify(struct bio *bio)
total += sectors * bi->tuple_size;
BUG_ON(total > bio->bi_integrity->bip_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
return ret;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index d986824..c053e90 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -89,7 +89,6 @@
#include "disk-io.h"
#include "transaction.h"
#include "extent_io.h"
-#include "disk-io.h"
#include "volumes.h"
#include "print-tree.h"
#include "locking.h"
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d02c27c..b805afb 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -120,10 +120,10 @@ static int check_compressed_csum(struct inode *inode,
page = cb->compressed_pages[i];
csum = ~(u32)0;
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
btrfs_csum_final(csum, (char *)&csum);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (csum != *cb_sum) {
printk(KERN_INFO "btrfs csum failed ino %llu "
@@ -521,10 +521,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (zero_offset) {
int zeros;
zeros = PAGE_CACHE_SIZE - zero_offset;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + zero_offset, 0, zeros);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
}
}
@@ -993,9 +993,9 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
bytes = min(PAGE_CACHE_SIZE - *pg_offset,
PAGE_CACHE_SIZE - buf_offset);
bytes = min(bytes, working_bytes);
- kaddr = kmap_atomic(page_out, KM_USER0);
+ kaddr = kmap_atomic(page_out);
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
flush_dcache_page(page_out);
*pg_offset += bytes;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a55fbe6..2862454 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2546,10 +2546,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
if (zero_offset) {
iosize = PAGE_CACHE_SIZE - zero_offset;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + zero_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
}
}
while (cur <= end) {
@@ -2558,10 +2558,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
struct extent_state *cached = NULL;
iosize = PAGE_CACHE_SIZE - pg_offset;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur, cur + iosize - 1,
@@ -2607,10 +2607,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
char *userpage;
struct extent_state *cached = NULL;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
@@ -2756,10 +2756,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
if (page->index == end_index) {
char *userpage;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0,
PAGE_CACHE_SIZE - pg_offset);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
flush_dcache_page(page);
}
pg_offset = 0;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index c7fb3a4..078b4fd 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -447,13 +447,13 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
sums->bytenr = ordered->start;
}
- data = kmap_atomic(bvec->bv_page, KM_USER0);
+ data = kmap_atomic(bvec->bv_page);
sector_sum->sum = ~(u32)0;
sector_sum->sum = btrfs_csum_data(root,
data + bvec->bv_offset,
sector_sum->sum,
bvec->bv_len);
- kunmap_atomic(data, KM_USER0);
+ kunmap_atomic(data);
btrfs_csum_final(sector_sum->sum,
(char *)&sector_sum->sum);
sector_sum->bytenr = disk_bytenr;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 710ea38..b02e379 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1068,7 +1068,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
spin_unlock(&block_group->lock);
ret = 0;
#ifdef DEBUG
- printk(KERN_ERR "btrfs: failed to write free space cace "
+ printk(KERN_ERR "btrfs: failed to write free space cache "
"for block group %llu\n", block_group->key.objectid);
#endif
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 892b347..3a0b5c1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -173,9 +173,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
cur_size = min_t(unsigned long, compressed_size,
PAGE_CACHE_SIZE);
- kaddr = kmap_atomic(cpage, KM_USER0);
+ kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
i++;
ptr += cur_size;
@@ -187,10 +187,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
page = find_get_page(inode->i_mapping,
start >> PAGE_CACHE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0);
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
offset = start & (PAGE_CACHE_SIZE - 1);
write_extent_buffer(leaf, kaddr + offset, ptr, size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
page_cache_release(page);
}
btrfs_mark_buffer_dirty(leaf);
@@ -422,10 +422,10 @@ again:
* sending it down to disk
*/
if (offset) {
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr + offset, 0,
PAGE_CACHE_SIZE - offset);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
will_compress = 1;
}
@@ -1873,7 +1873,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
} else {
ret = get_state_private(io_tree, start, &private);
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
if (ret)
goto zeroit;
@@ -1882,7 +1882,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
if (csum != private)
goto zeroit;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
good:
return 0;
@@ -1894,7 +1894,7 @@ zeroit:
(unsigned long long)private);
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (private == 0)
return 0;
return -EIO;
@@ -4937,12 +4937,12 @@ static noinline int uncompress_inline(struct btrfs_path *path,
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
if (ret) {
- char *kaddr = kmap_atomic(page, KM_USER0);
+ char *kaddr = kmap_atomic(page);
unsigned long copy_size = min_t(u64,
PAGE_CACHE_SIZE - pg_offset,
max_size - extent_offset);
memset(kaddr + pg_offset, 0, copy_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
kfree(tmp);
return 0;
@@ -5719,11 +5719,11 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
unsigned long flags;
local_irq_save(flags);
- kaddr = kmap_atomic(page, KM_IRQ0);
+ kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
csum, bvec->bv_len);
btrfs_csum_final(csum, (char *)&csum);
- kunmap_atomic(kaddr, KM_IRQ0);
+ kunmap_atomic(kaddr);
local_irq_restore(flags);
flush_dcache_page(bvec->bv_page);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a178f5e..743b86f 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -411,9 +411,9 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
bytes = min_t(unsigned long, destlen, out_len - start_byte);
- kaddr = kmap_atomic(dest_page, KM_USER0);
+ kaddr = kmap_atomic(dest_page);
memcpy(kaddr, workspace->buf + start_byte, bytes);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
out:
return ret;
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index abc0fbf..390e710 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -591,7 +591,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
u64 flags = sbio->spag[ix].flags;
page = sbio->bio->bi_io_vec[ix].bv_page;
- buffer = kmap_atomic(page, KM_USER0);
+ buffer = kmap_atomic(page);
if (flags & BTRFS_EXTENT_FLAG_DATA) {
ret = scrub_checksum_data(sbio->sdev,
sbio->spag + ix, buffer);
@@ -603,7 +603,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
} else {
WARN_ON(1);
}
- kunmap_atomic(buffer, KM_USER0);
+ kunmap_atomic(buffer);
return ret;
}
@@ -792,7 +792,7 @@ static void scrub_checksum(struct btrfs_work *work)
}
for (i = 0; i < sbio->count; ++i) {
page = sbio->bio->bi_io_vec[i].bv_page;
- buffer = kmap_atomic(page, KM_USER0);
+ buffer = kmap_atomic(page);
flags = sbio->spag[i].flags;
logical = sbio->logical + i * PAGE_SIZE;
ret = 0;
@@ -807,7 +807,7 @@ static void scrub_checksum(struct btrfs_work *work)
} else {
WARN_ON(1);
}
- kunmap_atomic(buffer, KM_USER0);
+ kunmap_atomic(buffer);
if (ret) {
ret = scrub_recheck_error(sbio, i);
if (!ret) {
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index faccd47..92c2065 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -370,9 +370,9 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
PAGE_CACHE_SIZE - buf_offset);
bytes = min(bytes, bytes_left);
- kaddr = kmap_atomic(dest_page, KM_USER0);
+ kaddr = kmap_atomic(dest_page);
memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
pg_offset += bytes;
bytes_left -= bytes;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index c1b2544..3cc1b25 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -556,6 +556,7 @@ init_cifs_idmap(void)
/* instruct request_key() to use this special keyring as a cache for
* the results it looks up */
+ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
root_cred = cred;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a26bea1..10d8cd9 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -34,7 +34,7 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
+#include <linux/ppp-ioctl.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
#include <linux/auto_fs.h>
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 853480d..d013c46 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -257,10 +257,10 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
/* Do sanity checks on the superblock */
if (super.magic != CRAMFS_MAGIC) {
- /* check for wrong endianess */
+ /* check for wrong endianness */
if (super.magic == CRAMFS_MAGIC_WEND) {
if (!silent)
- printk(KERN_ERR "cramfs: wrong endianess\n");
+ printk(KERN_ERR "cramfs: wrong endianness\n");
goto out;
}
@@ -270,7 +270,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
mutex_unlock(&read_mutex);
if (super.magic != CRAMFS_MAGIC) {
if (super.magic == CRAMFS_MAGIC_WEND && !silent)
- printk(KERN_ERR "cramfs: wrong endianess\n");
+ printk(KERN_ERR "cramfs: wrong endianness\n");
else if (!silent)
printk(KERN_ERR "cramfs: wrong magic\n");
goto out;
diff --git a/fs/dcache.c b/fs/dcache.c
index a78e145..e441941c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
static struct hlist_bl_head *dentry_hashtable __read_mostly;
static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
- unsigned long hash)
+ unsigned int hash)
{
- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
+ hash += (unsigned long) parent / L1_CACHE_BYTES;
+ hash = hash + (hash >> D_HASHBITS);
return dentry_hashtable + (hash & D_HASHMASK);
}
@@ -144,6 +144,28 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
static inline int dentry_cmp(const unsigned char *cs, size_t scount,
const unsigned char *ct, size_t tcount)
{
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+ unsigned long a,b,mask;
+
+ if (unlikely(scount != tcount))
+ return 1;
+
+ for (;;) {
+ a = *(unsigned long *)cs;
+ b = *(unsigned long *)ct;
+ if (tcount < sizeof(unsigned long))
+ break;
+ if (unlikely(a != b))
+ return 1;
+ cs += sizeof(unsigned long);
+ ct += sizeof(unsigned long);
+ tcount -= sizeof(unsigned long);
+ if (!tcount)
+ return 0;
+ }
+ mask = ~(~0ul << tcount*8);
+ return unlikely(!!((a ^ b) & mask));
+#else
if (scount != tcount)
return 1;
@@ -155,6 +177,7 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
tcount--;
} while (tcount);
return 0;
+#endif
}
static void __d_free(struct rcu_head *head)
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 956d5dd..b80bc84 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -23,9 +23,13 @@
#include <linux/debugfs.h>
#include <linux/fsnotify.h>
#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
+#define DEBUGFS_DEFAULT_MODE 0755
+
static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
@@ -125,11 +129,154 @@ static inline int debugfs_positive(struct dentry *dentry)
return dentry->d_inode && !d_unhashed(dentry);
}
+struct debugfs_mount_opts {
+ uid_t uid;
+ gid_t gid;
+ umode_t mode;
+};
+
+enum {
+ Opt_uid,
+ Opt_gid,
+ Opt_mode,
+ Opt_err
+};
+
+static const match_table_t tokens = {
+ {Opt_uid, "uid=%u"},
+ {Opt_gid, "gid=%u"},
+ {Opt_mode, "mode=%o"},
+ {Opt_err, NULL}
+};
+
+struct debugfs_fs_info {
+ struct debugfs_mount_opts mount_opts;
+};
+
+static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
+{
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+ int token;
+ char *p;
+
+ opts->mode = DEBUGFS_DEFAULT_MODE;
+
+ while ((p = strsep(&data, ",")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_uid:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ opts->uid = option;
+ break;
+ case Opt_gid:
+ if (match_octal(&args[0], &option))
+ return -EINVAL;
+ opts->gid = option;
+ break;
+ case Opt_mode:
+ if (match_octal(&args[0], &option))
+ return -EINVAL;
+ opts->mode = option & S_IALLUGO;
+ break;
+ /*
+ * We might like to report bad mount options here;
+ * but traditionally debugfs has ignored all mount options
+ */
+ }
+ }
+
+ return 0;
+}
+
+static int debugfs_apply_options(struct super_block *sb)
+{
+ struct debugfs_fs_info *fsi = sb->s_fs_info;
+ struct inode *inode = sb->s_root->d_inode;
+ struct debugfs_mount_opts *opts = &fsi->mount_opts;
+
+ inode->i_mode &= ~S_IALLUGO;
+ inode->i_mode |= opts->mode;
+
+ inode->i_uid = opts->uid;
+ inode->i_gid = opts->gid;
+
+ return 0;
+}
+
+static int debugfs_remount(struct super_block *sb, int *flags, char *data)
+{
+ int err;
+ struct debugfs_fs_info *fsi = sb->s_fs_info;
+
+ err = debugfs_parse_options(data, &fsi->mount_opts);
+ if (err)
+ goto fail;
+
+ debugfs_apply_options(sb);
+
+fail:
+ return err;
+}
+
+static int debugfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct debugfs_fs_info *fsi = root->d_sb->s_fs_info;
+ struct debugfs_mount_opts *opts = &fsi->mount_opts;
+
+ if (opts->uid != 0)
+ seq_printf(m, ",uid=%u", opts->uid);
+ if (opts->gid != 0)
+ seq_printf(m, ",gid=%u", opts->gid);
+ if (opts->mode != DEBUGFS_DEFAULT_MODE)
+ seq_printf(m, ",mode=%o", opts->mode);
+
+ return 0;
+}
+
+static const struct super_operations debugfs_super_operations = {
+ .statfs = simple_statfs,
+ .remount_fs = debugfs_remount,
+ .show_options = debugfs_show_options,
+};
+
static int debug_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr debug_files[] = {{""}};
+ struct debugfs_fs_info *fsi;
+ int err;
+
+ save_mount_options(sb, data);
+
+ fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL);
+ sb->s_fs_info = fsi;
+ if (!fsi) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ err = debugfs_parse_options(data, &fsi->mount_opts);
+ if (err)
+ goto fail;
+
+ err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
+ if (err)
+ goto fail;
+
+ sb->s_op = &debugfs_super_operations;
+
+ debugfs_apply_options(sb);
+
+ return 0;
- return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
+fail:
+ kfree(fsi);
+ sb->s_fs_info = NULL;
+ return err;
}
static struct dentry *debug_mount(struct file_system_type *fs_type,
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 57dae0b..10f5e0b 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -36,7 +36,61 @@
#define DEVPTS_DEFAULT_PTMX_MODE 0000
#define PTMX_MINOR 2
-extern int pty_limit; /* Config limit on Unix98 ptys */
+/*
+ * sysctl support for setting limits on the number of Unix98 ptys allocated.
+ * Otherwise one can eat up all kernel memory by opening /dev/ptmx repeatedly.
+ */
+static int pty_limit = NR_UNIX98_PTY_DEFAULT;
+static int pty_reserve = NR_UNIX98_PTY_RESERVE;
+static int pty_limit_min;
+static int pty_limit_max = INT_MAX;
+static int pty_count;
+
+static struct ctl_table pty_table[] = {
+ {
+ .procname = "max",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .data = &pty_limit,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &pty_limit_min,
+ .extra2 = &pty_limit_max,
+ }, {
+ .procname = "reserve",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .data = &pty_reserve,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &pty_limit_min,
+ .extra2 = &pty_limit_max,
+ }, {
+ .procname = "nr",
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .data = &pty_count,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static struct ctl_table pty_kern_table[] = {
+ {
+ .procname = "pty",
+ .mode = 0555,
+ .child = pty_table,
+ },
+ {}
+};
+
+static struct ctl_table pty_root_table[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = pty_kern_table,
+ },
+ {}
+};
+
static DEFINE_MUTEX(allocated_ptys_lock);
static struct vfsmount *devpts_mnt;
@@ -49,10 +103,11 @@ struct pts_mount_opts {
umode_t mode;
umode_t ptmxmode;
int newinstance;
+ int max;
};
enum {
- Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance,
+ Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance, Opt_max,
Opt_err
};
@@ -63,6 +118,7 @@ static const match_table_t tokens = {
#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
{Opt_ptmxmode, "ptmxmode=%o"},
{Opt_newinstance, "newinstance"},
+ {Opt_max, "max=%d"},
#endif
{Opt_err, NULL}
};
@@ -109,6 +165,7 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
opts->gid = 0;
opts->mode = DEVPTS_DEFAULT_MODE;
opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+ opts->max = NR_UNIX98_PTY_MAX;
/* newinstance makes sense only on initial mount */
if (op == PARSE_MOUNT)
@@ -152,6 +209,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
if (op == PARSE_MOUNT)
opts->newinstance = 1;
break;
+ case Opt_max:
+ if (match_int(&args[0], &option) ||
+ option < 0 || option > NR_UNIX98_PTY_MAX)
+ return -EINVAL;
+ opts->max = option;
+ break;
#endif
default:
printk(KERN_ERR "devpts: called with bogus options\n");
@@ -258,6 +321,8 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root)
seq_printf(seq, ",mode=%03o", opts->mode);
#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode);
+ if (opts->max < NR_UNIX98_PTY_MAX)
+ seq_printf(seq, ",max=%d", opts->max);
#endif
return 0;
@@ -437,6 +502,12 @@ retry:
return -ENOMEM;
mutex_lock(&allocated_ptys_lock);
+ if (pty_count >= pty_limit -
+ (fsi->mount_opts.newinstance ? pty_reserve : 0)) {
+ mutex_unlock(&allocated_ptys_lock);
+ return -ENOSPC;
+ }
+
ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
if (ida_ret < 0) {
mutex_unlock(&allocated_ptys_lock);
@@ -445,11 +516,12 @@ retry:
return -EIO;
}
- if (index >= pty_limit) {
+ if (index >= fsi->mount_opts.max) {
ida_remove(&fsi->allocated_ptys, index);
mutex_unlock(&allocated_ptys_lock);
- return -EIO;
+ return -ENOSPC;
}
+ pty_count++;
mutex_unlock(&allocated_ptys_lock);
return index;
}
@@ -461,6 +533,7 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
mutex_lock(&allocated_ptys_lock);
ida_remove(&fsi->allocated_ptys, idx);
+ pty_count--;
mutex_unlock(&allocated_ptys_lock);
}
@@ -557,11 +630,15 @@ void devpts_pty_kill(struct tty_struct *tty)
static int __init init_devpts_fs(void)
{
int err = register_filesystem(&devpts_fs_type);
+ struct ctl_table_header *table;
+
if (!err) {
+ table = register_sysctl_table(pty_root_table);
devpts_mnt = kern_mount(&devpts_fs_type);
if (IS_ERR(devpts_mnt)) {
err = PTR_ERR(devpts_mnt);
unregister_filesystem(&devpts_fs_type);
+ unregister_sysctl_table(table);
}
}
return err;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 0b3109e..ca0c59a 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -52,6 +52,7 @@
#include <linux/mutex.h>
#include <linux/sctp.h>
#include <linux/slab.h>
+#include <net/sctp/sctp.h>
#include <net/sctp/user.h>
#include <net/ipv6.h>
@@ -474,9 +475,6 @@ static void process_sctp_notification(struct connection *con,
int prim_len, ret;
int addr_len;
struct connection *new_con;
- sctp_peeloff_arg_t parg;
- int parglen = sizeof(parg);
- int err;
/*
* We get this before any data for an association.
@@ -525,23 +523,19 @@ static void process_sctp_notification(struct connection *con,
return;
/* Peel off a new sock */
- parg.associd = sn->sn_assoc_change.sac_assoc_id;
- ret = kernel_getsockopt(con->sock, IPPROTO_SCTP,
- SCTP_SOCKOPT_PEELOFF,
- (void *)&parg, &parglen);
+ sctp_lock_sock(con->sock->sk);
+ ret = sctp_do_peeloff(con->sock->sk,
+ sn->sn_assoc_change.sac_assoc_id,
+ &new_con->sock);
+ sctp_release_sock(con->sock->sk);
if (ret < 0) {
log_print("Can't peel off a socket for "
"connection %d to node %d: err=%d",
- parg.associd, nodeid, ret);
- return;
- }
- new_con->sock = sockfd_lookup(parg.sd, &err);
- if (!new_con->sock) {
- log_print("sockfd_lookup error %d", err);
+ (int)sn->sn_assoc_change.sac_assoc_id,
+ nodeid, ret);
return;
}
add_sock(new_con->sock, new_con);
- sockfd_put(new_con->sock);
log_print("connecting to %d sctp association %d",
nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
diff --git a/fs/exec.c b/fs/exec.c
index 60478a0..0b931471 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -63,6 +63,8 @@
#include <trace/events/task.h>
#include "internal.h"
+#include <trace/events/sched.h>
+
int core_uses_pid;
char core_pattern[CORENAME_MAX_SIZE] = "core";
unsigned int core_pipe_limit;
@@ -846,6 +848,7 @@ static int exec_mmap(struct mm_struct *mm)
if (old_mm) {
up_read(&old_mm->mmap_sem);
BUG_ON(active_mm != old_mm);
+ setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
mm_update_next_owner(old_mm);
mmput(old_mm);
return 0;
@@ -973,8 +976,8 @@ static int de_thread(struct task_struct *tsk)
sig->notify_count = 0;
no_thread_group:
- if (current->mm)
- setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
+ /* we have changed execution domain */
+ tsk->exit_signal = SIGCHLD;
exit_itimers(sig);
flush_itimer_signals();
@@ -1337,13 +1340,13 @@ int remove_arg_zero(struct linux_binprm *bprm)
ret = -EFAULT;
goto out;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
for (; offset < PAGE_SIZE && kaddr[offset];
offset++, bprm->p++)
;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
put_arg_page(page);
if (offset == PAGE_SIZE)
@@ -1400,9 +1403,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
*/
bprm->recursion_depth = depth;
if (retval >= 0) {
- if (depth == 0)
- ptrace_event(PTRACE_EVENT_EXEC,
- old_pid);
+ if (depth == 0) {
+ trace_sched_process_exec(current, old_pid, bprm);
+ ptrace_event(PTRACE_EVENT_EXEC, old_pid);
+ }
put_binfmt(fmt);
allow_write_access(bprm->file);
if (bprm->file)
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 8040583..c61e62a 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -597,7 +597,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
goto fail;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
de = (struct exofs_dir_entry *)kaddr;
de->name_len = 1;
de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1));
@@ -611,7 +611,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
de->inode_no = cpu_to_le64(parent->i_ino);
memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
exofs_set_de_type(de, inode);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
err = exofs_commit_chunk(page, 0, chunk_size);
fail:
page_cache_release(page);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d37df35..0f4f5c9 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -645,7 +645,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
unlock_page(page);
goto fail;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr, 0, chunk_size);
de = (struct ext2_dir_entry_2 *)kaddr;
de->name_len = 1;
@@ -660,7 +660,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
de->inode = cpu_to_le32(parent->i_ino);
memcpy (de->name, "..\0", 4);
ext2_set_de_type (de, inode);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
err = ext2_commit_chunk(page, 0, chunk_size);
fail:
page_cache_release(page);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5b4a936..77b535a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1284,7 +1284,7 @@ int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
/**
- * writeback_inodes_sb_if_idle - start writeback if none underway
+ * writeback_inodes_sb_nr_if_idle - start writeback if none underway
* @sb: the superblock
* @nr: the number of pages to write
* @reason: reason why some writeback work was initiated
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5f3368a..7df2b5e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -838,10 +838,10 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
}
}
if (page) {
- void *mapaddr = kmap_atomic(page, KM_USER0);
+ void *mapaddr = kmap_atomic(page);
void *buf = mapaddr + offset;
offset += fuse_copy_do(cs, &buf, &count);
- kunmap_atomic(mapaddr, KM_USER0);
+ kunmap_atomic(mapaddr);
} else
offset += fuse_copy_do(cs, NULL, &count);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4a199fd..a841868 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1887,11 +1887,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
- vaddr = kmap_atomic(pages[0], KM_USER0);
+ vaddr = kmap_atomic(pages[0]);
err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
transferred, in_iovs + out_iovs,
(flags & FUSE_IOCTL_COMPAT) != 0);
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
if (err)
goto out;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 501e5cb..38b7a74 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -434,12 +434,12 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
if (error)
return error;
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
flush_dcache_page(page);
brelse(dibh);
SetPageUptodate(page);
@@ -542,9 +542,9 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
- p = kmap_atomic(page, KM_USER0);
+ p = kmap_atomic(page);
memcpy(buf + copied, p + offset, amt);
- kunmap_atomic(p, KM_USER0);
+ kunmap_atomic(p);
mark_page_accessed(page);
page_cache_release(page);
copied += amt;
@@ -788,11 +788,11 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memcpy(buf + pos, kaddr + pos, copied);
memset(kaddr + pos + copied, 0, len - copied);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (!PageUptodate(page))
SetPageUptodate(page);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 0301be6..df7c6e8 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -553,11 +553,11 @@ static void gfs2_check_magic(struct buffer_head *bh)
__be32 *ptr;
clear_buffer_escaped(bh);
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
ptr = kaddr + bh_offset(bh);
if (*ptr == cpu_to_be32(GFS2_MAGIC))
set_buffer_escaped(bh);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
@@ -594,10 +594,10 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
if (buffer_escaped(bd->bd_bh)) {
void *kaddr;
bh1 = gfs2_log_get_buf(sdp);
- kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bd->bd_bh->b_page);
memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
bh1->b_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
*(__be32 *)bh1->b_data = 0;
clear_buffer_escaped(bd->bd_bh);
unlock_buffer(bd->bd_bh);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a45b21b..c0f8904 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -720,12 +720,12 @@ get_a_page:
gfs2_trans_add_bh(ip->i_gl, bh, 0);
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
nbytes = PAGE_CACHE_SIZE - offset;
memcpy(kaddr + offset, ptr, nbytes);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
unlock_page(page);
page_cache_release(page);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 59c09f9..0971e92 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -129,6 +129,8 @@ static int kjournald(void *arg)
setup_timer(&journal->j_commit_timer, commit_timeout,
(unsigned long)current);
+ set_freezable();
+
/* Record that the journal thread is running */
journal->j_task = current;
wake_up(&journal->j_wait_done_commit);
@@ -328,7 +330,7 @@ repeat:
new_offset = offset_in_page(jh2bh(jh_in)->b_data);
}
- mapped_data = kmap_atomic(new_page, KM_USER0);
+ mapped_data = kmap_atomic(new_page);
/*
* Check for escaping
*/
@@ -337,7 +339,7 @@ repeat:
need_copy_out = 1;
do_escape = 1;
}
- kunmap_atomic(mapped_data, KM_USER0);
+ kunmap_atomic(mapped_data);
/*
* Do we need to do a data copy?
@@ -354,9 +356,9 @@ repeat:
}
jh_in->b_frozen_data = tmp;
- mapped_data = kmap_atomic(new_page, KM_USER0);
+ mapped_data = kmap_atomic(new_page);
memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
- kunmap_atomic(mapped_data, KM_USER0);
+ kunmap_atomic(mapped_data);
new_page = virt_to_page(tmp);
new_offset = offset_in_page(tmp);
@@ -368,9 +370,9 @@ repeat:
* copying, we can finally do so.
*/
if (do_escape) {
- mapped_data = kmap_atomic(new_page, KM_USER0);
+ mapped_data = kmap_atomic(new_page);
*((unsigned int *)(mapped_data + new_offset)) = 0;
- kunmap_atomic(mapped_data, KM_USER0);
+ kunmap_atomic(mapped_data);
}
set_bh_page(new_bh, new_page, new_offset);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 7fce94b..b2a7e52 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -718,9 +718,9 @@ done:
"Possible IO failure.\n");
page = jh2bh(jh)->b_page;
offset = offset_in_page(jh2bh(jh)->b_data);
- source = kmap_atomic(page, KM_USER0);
+ source = kmap_atomic(page);
memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
- kunmap_atomic(source, KM_USER0);
+ kunmap_atomic(source);
}
jbd_unlock_bh_state(bh);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 5069b84..c067a8c 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -286,10 +286,10 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
char *addr;
__u32 checksum;
- addr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page);
checksum = crc32_be(crc32_sum,
(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
- kunmap_atomic(addr, KM_USER0);
+ kunmap_atomic(addr);
return checksum;
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c0a5f9f..839377e 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -139,6 +139,8 @@ static int kjournald2(void *arg)
setup_timer(&journal->j_commit_timer, commit_timeout,
(unsigned long)current);
+ set_freezable();
+
/* Record that the journal thread is running */
journal->j_task = current;
wake_up(&journal->j_wait_done_commit);
@@ -345,7 +347,7 @@ repeat:
new_offset = offset_in_page(jh2bh(jh_in)->b_data);
}
- mapped_data = kmap_atomic(new_page, KM_USER0);
+ mapped_data = kmap_atomic(new_page);
/*
* Fire data frozen trigger if data already wasn't frozen. Do this
* before checking for escaping, as the trigger may modify the magic
@@ -364,7 +366,7 @@ repeat:
need_copy_out = 1;
do_escape = 1;
}
- kunmap_atomic(mapped_data, KM_USER0);
+ kunmap_atomic(mapped_data);
/*
* Do we need to do a data copy?
@@ -385,9 +387,9 @@ repeat:
}
jh_in->b_frozen_data = tmp;
- mapped_data = kmap_atomic(new_page, KM_USER0);
+ mapped_data = kmap_atomic(new_page);
memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
- kunmap_atomic(mapped_data, KM_USER0);
+ kunmap_atomic(mapped_data);
new_page = virt_to_page(tmp);
new_offset = offset_in_page(tmp);
@@ -406,9 +408,9 @@ repeat:
* copying, we can finally do so.
*/
if (do_escape) {
- mapped_data = kmap_atomic(new_page, KM_USER0);
+ mapped_data = kmap_atomic(new_page);
*((unsigned int *)(mapped_data + new_offset)) = 0;
- kunmap_atomic(mapped_data, KM_USER0);
+ kunmap_atomic(mapped_data);
}
set_bh_page(new_bh, new_page, new_offset);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 35ae096..e5aba56 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -783,12 +783,12 @@ done:
"Possible IO failure.\n");
page = jh2bh(jh)->b_page;
offset = offset_in_page(jh2bh(jh)->b_data);
- source = kmap_atomic(page, KM_USER0);
+ source = kmap_atomic(page);
/* Fire data frozen trigger just before we copy the data */
jbd2_buffer_frozen_trigger(jh, source + offset,
jh->b_triggers);
memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
- kunmap_atomic(source, KM_USER0);
+ kunmap_atomic(source);
/*
* Now that the frozen data is saved off, we need to store
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index 5b6c9d1..96ed3c9 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -340,7 +340,7 @@ int jffs2_unregister_compressor(struct jffs2_compressor *comp)
if (comp->usecount) {
spin_unlock(&jffs2_compressor_list_lock);
- printk(KERN_WARNING "JFFS2: Compressor modul is in use. Unregister failed.\n");
+ printk(KERN_WARNING "JFFS2: Compressor module is in use. Unregister failed.\n");
return -1;
}
list_del(&comp->list);
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 4aea231..bea5d1b 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -177,17 +177,17 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
(filler_t *)logfs_readpage, NULL);
if (IS_ERR(page))
return page;
- dd = kmap_atomic(page, KM_USER0);
+ dd = kmap_atomic(page);
BUG_ON(dd->namelen == 0);
if (name->len != be16_to_cpu(dd->namelen) ||
memcmp(name->name, dd->name, name->len)) {
- kunmap_atomic(dd, KM_USER0);
+ kunmap_atomic(dd);
page_cache_release(page);
continue;
}
- kunmap_atomic(dd, KM_USER0);
+ kunmap_atomic(dd);
return page;
}
return NULL;
@@ -365,9 +365,9 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
return NULL;
}
index = page->index;
- dd = kmap_atomic(page, KM_USER0);
+ dd = kmap_atomic(page);
ino = be64_to_cpu(dd->ino);
- kunmap_atomic(dd, KM_USER0);
+ kunmap_atomic(dd);
page_cache_release(page);
inode = logfs_iget(dir->i_sb, ino);
@@ -402,12 +402,12 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
if (!page)
return -ENOMEM;
- dd = kmap_atomic(page, KM_USER0);
+ dd = kmap_atomic(page);
memset(dd, 0, sizeof(*dd));
dd->ino = cpu_to_be64(inode->i_ino);
dd->type = logfs_type(inode);
logfs_set_name(dd, &dentry->d_name);
- kunmap_atomic(dd, KM_USER0);
+ kunmap_atomic(dd);
err = logfs_write_buf(dir, page, WF_LOCK);
unlock_page(page);
@@ -576,9 +576,9 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
if (IS_ERR(page))
return PTR_ERR(page);
*pos = page->index;
- map = kmap_atomic(page, KM_USER0);
+ map = kmap_atomic(page);
memcpy(dd, map, sizeof(*dd));
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page_cache_release(page);
return 0;
}
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 4153e65..e3ab5e5 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -517,9 +517,9 @@ static int indirect_write_alias(struct super_block *sb,
ino = page->mapping->host->i_ino;
logfs_unpack_index(page->index, &bix, &level);
- child = kmap_atomic(page, KM_USER0);
+ child = kmap_atomic(page);
val = child[pos];
- kunmap_atomic(child, KM_USER0);
+ kunmap_atomic(child);
err = write_one_alias(sb, ino, bix, level, pos, val);
if (err)
return err;
@@ -673,9 +673,9 @@ static void alloc_indirect_block(struct inode *inode, struct page *page,
alloc_data_block(inode, page);
block = logfs_block(page);
- array = kmap_atomic(page, KM_USER0);
+ array = kmap_atomic(page);
initialize_block_counters(page, block, array, page_is_empty);
- kunmap_atomic(array, KM_USER0);
+ kunmap_atomic(array);
}
static void block_set_pointer(struct page *page, int index, u64 ptr)
@@ -685,10 +685,10 @@ static void block_set_pointer(struct page *page, int index, u64 ptr)
u64 oldptr;
BUG_ON(!block);
- array = kmap_atomic(page, KM_USER0);
+ array = kmap_atomic(page);
oldptr = be64_to_cpu(array[index]);
array[index] = cpu_to_be64(ptr);
- kunmap_atomic(array, KM_USER0);
+ kunmap_atomic(array);
SetPageUptodate(page);
block->full += !!(ptr & LOGFS_FULLY_POPULATED)
@@ -701,9 +701,9 @@ static u64 block_get_pointer(struct page *page, int index)
__be64 *block;
u64 ptr;
- block = kmap_atomic(page, KM_USER0);
+ block = kmap_atomic(page);
ptr = be64_to_cpu(block[index]);
- kunmap_atomic(block, KM_USER0);
+ kunmap_atomic(block);
return ptr;
}
@@ -850,7 +850,7 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
}
slot = get_bits(bix, SUBLEVEL(level));
- rblock = kmap_atomic(page, KM_USER0);
+ rblock = kmap_atomic(page);
while (slot < LOGFS_BLOCK_FACTOR) {
if (data && (rblock[slot] != 0))
break;
@@ -861,12 +861,12 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
bix &= ~(increment - 1);
}
if (slot >= LOGFS_BLOCK_FACTOR) {
- kunmap_atomic(rblock, KM_USER0);
+ kunmap_atomic(rblock);
logfs_put_read_page(page);
return bix;
}
bofs = be64_to_cpu(rblock[slot]);
- kunmap_atomic(rblock, KM_USER0);
+ kunmap_atomic(rblock);
logfs_put_read_page(page);
if (!bofs) {
BUG_ON(data);
@@ -1961,9 +1961,9 @@ int logfs_read_inode(struct inode *inode)
if (IS_ERR(page))
return PTR_ERR(page);
- di = kmap_atomic(page, KM_USER0);
+ di = kmap_atomic(page);
logfs_disk_to_inode(di, inode);
- kunmap_atomic(di, KM_USER0);
+ kunmap_atomic(di);
move_page_to_inode(inode, page);
page_cache_release(page);
return 0;
@@ -1982,9 +1982,9 @@ static struct page *inode_to_page(struct inode *inode)
if (!page)
return NULL;
- di = kmap_atomic(page, KM_USER0);
+ di = kmap_atomic(page);
logfs_inode_to_disk(inode, di);
- kunmap_atomic(di, KM_USER0);
+ kunmap_atomic(di);
move_inode_to_page(page, inode);
return page;
}
@@ -2041,13 +2041,13 @@ static void logfs_mod_segment_entry(struct super_block *sb, u32 segno,
if (write)
alloc_indirect_block(inode, page, 0);
- se = kmap_atomic(page, KM_USER0);
+ se = kmap_atomic(page);
change_se(se + child_no, arg);
if (write) {
logfs_set_alias(sb, logfs_block(page), child_no);
BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize);
}
- kunmap_atomic(se, KM_USER0);
+ kunmap_atomic(se);
logfs_put_write_page(page);
}
@@ -2245,10 +2245,10 @@ int logfs_inode_write(struct inode *inode, const void *buf, size_t count,
if (!page)
return -ENOMEM;
- pagebuf = kmap_atomic(page, KM_USER0);
+ pagebuf = kmap_atomic(page);
memcpy(pagebuf, buf, count);
flush_dcache_page(page);
- kunmap_atomic(pagebuf, KM_USER0);
+ kunmap_atomic(pagebuf);
if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE)
i_size_write(inode, pos + LOGFS_BLOCKSIZE);
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index ab798ed..e28d090 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -543,9 +543,9 @@ void move_page_to_btree(struct page *page)
BUG_ON(!item); /* mempool empty */
memset(item, 0, sizeof(*item));
- child = kmap_atomic(page, KM_USER0);
+ child = kmap_atomic(page);
item->val = child[pos];
- kunmap_atomic(child, KM_USER0);
+ kunmap_atomic(child);
item->child_no = pos;
list_add(&item->list, &block->item_list);
}
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 085a926..685b2d9 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -335,7 +335,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
goto fail;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
if (sbi->s_version == MINIX_V3) {
@@ -355,7 +355,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
de->inode = dir->i_ino;
strcpy(de->name, "..");
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
fail:
diff --git a/fs/namei.c b/fs/namei.c
index 0ccc74ee..13e6a1f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1374,6 +1374,126 @@ static inline int can_lookup(struct inode *inode)
return 1;
}
+/*
+ * We can do the critical dentry name comparison and hashing
+ * operations one word at a time, but we are limited to:
+ *
+ * - Architectures with fast unaligned word accesses. We could
+ * do a "get_unaligned()" if this helps and is sufficiently
+ * fast.
+ *
+ * - Little-endian machines (so that we can generate the mask
+ * of low bytes efficiently). Again, we *could* do a byte
+ * swapping load on big-endian architectures if that is not
+ * expensive enough to make the optimization worthless.
+ *
+ * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
+ * do not trap on the (extremely unlikely) case of a page
+ * crossing operation.
+ *
+ * - Furthermore, we need an efficient 64-bit compile for the
+ * 64-bit case in order to generate the "number of bytes in
+ * the final mask". Again, that could be replaced with a
+ * efficient population count instruction or similar.
+ */
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608 >> 56;
+}
+
+static inline unsigned int fold_hash(unsigned long hash)
+{
+ hash += hash >> (8*sizeof(int));
+ return hash;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#define fold_hash(x) (x)
+
+#endif
+
+unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+{
+ unsigned long a, mask;
+ unsigned long hash = 0;
+
+ for (;;) {
+ a = *(unsigned long *)name;
+ hash *= 9;
+ if (len < sizeof(unsigned long))
+ break;
+ hash += a;
+ name += sizeof(unsigned long);
+ len -= sizeof(unsigned long);
+ if (!len)
+ goto done;
+ }
+ mask = ~(~0ul << len*8);
+ hash += mask & a;
+done:
+ return fold_hash(hash);
+}
+EXPORT_SYMBOL(full_name_hash);
+
+#define ONEBYTES 0x0101010101010101ul
+#define SLASHBYTES 0x2f2f2f2f2f2f2f2ful
+#define HIGHBITS 0x8080808080808080ul
+
+/* Return the high bit set in the first byte that is a zero */
+static inline unsigned long has_zero(unsigned long a)
+{
+ return ((a - ONEBYTES) & ~a) & HIGHBITS;
+}
+
+/*
+ * Calculate the length and hash of the path component, and
+ * return the length of the component;
+ */
+static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+{
+ unsigned long a, mask, hash, len;
+
+ hash = a = 0;
+ len = -sizeof(unsigned long);
+ do {
+ hash = (hash + a) * 9;
+ len += sizeof(unsigned long);
+ a = *(unsigned long *)(name+len);
+ /* Do we have any NUL or '/' bytes in this word? */
+ mask = has_zero(a) | has_zero(a ^ SLASHBYTES);
+ } while (!mask);
+
+ /* The mask *below* the first high bit set */
+ mask = (mask - 1) & ~mask;
+ mask >>= 7;
+ hash += a & mask;
+ *hashp = fold_hash(hash);
+
+ return len + count_masked_bytes(mask);
+}
+
+#else
+
unsigned int full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long hash = init_name_hash();
@@ -1402,6 +1522,8 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
return len;
}
+#endif
+
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
@@ -3384,9 +3506,9 @@ retry:
if (err)
goto fail;
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len-1);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
page, fsdata);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 31778f7..d4f772e 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -36,6 +36,7 @@
#include <linux/inet.h>
#include <linux/in6.h>
#include <linux/slab.h>
+#include <linux/idr.h>
#include <net/ipv6.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/bc_xprt.h>
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index fd9a872..32aa691 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -260,10 +260,10 @@ void nfs_readdir_clear_array(struct page *page)
struct nfs_cache_array *array;
int i;
- array = kmap_atomic(page, KM_USER0);
+ array = kmap_atomic(page);
for (i = 0; i < array->size; i++)
kfree(array->array[i].string.name);
- kunmap_atomic(array, KM_USER0);
+ kunmap_atomic(array);
}
/*
@@ -1870,11 +1870,11 @@ static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *sym
if (!page)
return -ENOMEM;
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memcpy(kaddr, symname, pathlen);
if (pathlen < PAGE_SIZE)
memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
if (error != 0) {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 2c05f19..a1bbf77 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -198,6 +198,7 @@ int nfs_idmap_init(void)
if (ret < 0)
goto failed_put_key;
+ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
id_resolver_cache = cred;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ec9f6ef..caf92d0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -193,7 +193,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
* when talking to the server, we always send cookie 0
* instead of 1 or 2.
*/
- start = p = kmap_atomic(*readdir->pages, KM_USER0);
+ start = p = kmap_atomic(*readdir->pages);
if (cookie == 0) {
*p++ = xdr_one; /* next */
@@ -221,7 +221,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
readdir->pgbase = (char *)p - (char *)start;
readdir->count -= readdir->pgbase;
- kunmap_atomic(start, KM_USER0);
+ kunmap_atomic(start);
}
static int nfs4_wait_clnt_recover(struct nfs_client *clp)
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index c9b342c..dab5c4c 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -218,11 +218,11 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
kaddr, 1);
mark_buffer_dirty(cp_bh);
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = nilfs_cpfile_block_get_header(cpfile, header_bh,
kaddr);
le64_add_cpu(&header->ch_ncheckpoints, 1);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile);
}
@@ -313,7 +313,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
continue;
}
- kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(cp_bh->b_page);
cp = nilfs_cpfile_block_get_checkpoint(
cpfile, cno, cp_bh, kaddr);
nicps = 0;
@@ -334,7 +334,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
cpfile, cp_bh, kaddr, nicps);
if (count == 0) {
/* make hole */
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(cp_bh);
ret =
nilfs_cpfile_delete_checkpoint_block(
@@ -349,18 +349,18 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
}
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(cp_bh);
}
if (tnicps > 0) {
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = nilfs_cpfile_block_get_header(cpfile, header_bh,
kaddr);
le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
brelse(header_bh);
@@ -408,7 +408,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
continue; /* skip hole */
}
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
if (!nilfs_checkpoint_invalid(cp)) {
@@ -418,7 +418,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
n++;
}
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(bh);
}
@@ -451,10 +451,10 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
ret = nilfs_cpfile_get_header_block(cpfile, &bh);
if (ret < 0)
goto out;
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(bh);
if (curr == 0) {
ret = 0;
@@ -472,7 +472,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
ret = 0; /* No snapshots (started from a hole block) */
goto out;
}
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
while (n < nci) {
cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
curr = ~(__u64)0; /* Terminator */
@@ -488,7 +488,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
if (curr_blkoff != next_blkoff) {
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(bh);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
0, &bh);
@@ -496,12 +496,12 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
WARN_ON(ret == -ENOENT);
goto out;
}
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
}
curr = next;
curr_blkoff = next_blkoff;
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(bh);
*cnop = curr;
ret = n;
@@ -592,24 +592,24 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
if (ret < 0)
goto out_sem;
- kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(cp_bh->b_page);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
if (nilfs_checkpoint_invalid(cp)) {
ret = -ENOENT;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
goto out_cp;
}
if (nilfs_checkpoint_snapshot(cp)) {
ret = 0;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
goto out_cp;
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
if (ret < 0)
goto out_cp;
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
list = &header->ch_snapshot_list;
curr_bh = header_bh;
@@ -621,13 +621,13 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
curr = prev;
if (curr_blkoff != prev_blkoff) {
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(curr_bh);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
0, &curr_bh);
if (ret < 0)
goto out_header;
- kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(curr_bh->b_page);
}
curr_blkoff = prev_blkoff;
cp = nilfs_cpfile_block_get_checkpoint(
@@ -635,7 +635,7 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
list = &cp->cp_snapshot_list;
prev = le64_to_cpu(list->ssl_prev);
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (prev != 0) {
ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
@@ -647,29 +647,29 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
get_bh(prev_bh);
}
- kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(curr_bh->b_page);
list = nilfs_cpfile_block_get_snapshot_list(
cpfile, curr, curr_bh, kaddr);
list->ssl_prev = cpu_to_le64(cno);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
- kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(cp_bh->b_page);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
nilfs_checkpoint_set_snapshot(cp);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
- kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(prev_bh->b_page);
list = nilfs_cpfile_block_get_snapshot_list(
cpfile, prev, prev_bh, kaddr);
list->ssl_next = cpu_to_le64(cno);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
le64_add_cpu(&header->ch_nsnapshots, 1);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(prev_bh);
mark_buffer_dirty(curr_bh);
@@ -710,23 +710,23 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
if (ret < 0)
goto out_sem;
- kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(cp_bh->b_page);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
if (nilfs_checkpoint_invalid(cp)) {
ret = -ENOENT;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
goto out_cp;
}
if (!nilfs_checkpoint_snapshot(cp)) {
ret = 0;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
goto out_cp;
}
list = &cp->cp_snapshot_list;
next = le64_to_cpu(list->ssl_next);
prev = le64_to_cpu(list->ssl_prev);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
if (ret < 0)
@@ -750,29 +750,29 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
get_bh(prev_bh);
}
- kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(next_bh->b_page);
list = nilfs_cpfile_block_get_snapshot_list(
cpfile, next, next_bh, kaddr);
list->ssl_prev = cpu_to_le64(prev);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
- kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(prev_bh->b_page);
list = nilfs_cpfile_block_get_snapshot_list(
cpfile, prev, prev_bh, kaddr);
list->ssl_next = cpu_to_le64(next);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
- kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(cp_bh->b_page);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
nilfs_checkpoint_clear_snapshot(cp);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
le64_add_cpu(&header->ch_nsnapshots, -1);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(next_bh);
mark_buffer_dirty(prev_bh);
@@ -829,13 +829,13 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
if (ret < 0)
goto out;
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
if (nilfs_checkpoint_invalid(cp))
ret = -ENOENT;
else
ret = nilfs_checkpoint_snapshot(cp);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(bh);
out:
@@ -912,12 +912,12 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
ret = nilfs_cpfile_get_header_block(cpfile, &bh);
if (ret < 0)
goto out_sem;
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
cpstat->cs_cno = nilfs_mdt_cno(cpfile);
cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(bh);
out_sem:
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index fcc2f86..b5c13f3 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
struct nilfs_dat_entry *entry;
void *kaddr;
- kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
entry->de_blocknr = cpu_to_le64(0);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
nilfs_palloc_commit_alloc_entry(dat, req);
nilfs_dat_commit_entry(dat, req);
@@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat,
struct nilfs_dat_entry *entry;
void *kaddr;
- kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
entry->de_blocknr = cpu_to_le64(0);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req);
nilfs_palloc_commit_free_entry(dat, req);
@@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
struct nilfs_dat_entry *entry;
void *kaddr;
- kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
entry->de_blocknr = cpu_to_le64(blocknr);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req);
}
@@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
return ret;
}
- kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (blocknr == 0) {
ret = nilfs_palloc_prepare_free_entry(dat, req);
@@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
sector_t blocknr;
void *kaddr;
- kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
end = start = le64_to_cpu(entry->de_start);
@@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
}
entry->de_end = cpu_to_le64(end);
blocknr = le64_to_cpu(entry->de_blocknr);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (blocknr == 0)
nilfs_dat_commit_free(dat, req);
@@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
sector_t blocknr;
void *kaddr;
- kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (start == nilfs_mdt_cno(dat) && blocknr == 0)
nilfs_palloc_abort_free_entry(dat, req);
@@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
}
}
- kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
(unsigned long long)vblocknr,
(unsigned long long)le64_to_cpu(entry->de_start),
(unsigned long long)le64_to_cpu(entry->de_end));
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(entry_bh);
return -EINVAL;
}
WARN_ON(blocknr == 0);
entry->de_blocknr = cpu_to_le64(blocknr);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(entry_bh);
nilfs_mdt_mark_dirty(dat);
@@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
}
}
- kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
blocknr = le64_to_cpu(entry->de_blocknr);
if (blocknr == 0) {
@@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
*blocknrp = blocknr;
out:
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(entry_bh);
return ret;
}
@@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
0, &entry_bh);
if (ret < 0)
return ret;
- kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(entry_bh->b_page);
/* last virtual block number in this block */
first = vinfo->vi_vblocknr;
do_div(first, entries_per_block);
@@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
vinfo->vi_end = le64_to_cpu(entry->de_end);
vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(entry_bh);
}
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index ca35b3a..df1a7fb 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -602,7 +602,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
unlock_page(page);
goto fail;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr, 0, chunk_size);
de = (struct nilfs_dir_entry *)kaddr;
de->name_len = 1;
@@ -617,7 +617,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
de->inode = cpu_to_le64(parent->i_ino);
memcpy(de->name, "..\0", 4);
nilfs_set_de_type(de, inode);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
nilfs_commit_chunk(page, mapping, 0, chunk_size);
fail:
page_cache_release(page);
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 684d763..5a48df7 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -122,11 +122,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
return ret;
}
- kaddr = kmap_atomic(req.pr_entry_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(req.pr_entry_bh->b_page);
raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr,
req.pr_entry_bh, kaddr);
raw_inode->i_flags = 0;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(req.pr_entry_bh);
brelse(req.pr_entry_bh);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 800e8d7..f9897d0 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -58,12 +58,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
set_buffer_mapped(bh);
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
if (init_block)
init_block(inode, bh, kaddr);
flush_dcache_page(bh->b_page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 65221a0..3e7b2a0 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -119,11 +119,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
struct page *spage = sbh->b_page, *dpage = dbh->b_page;
struct buffer_head *bh;
- kaddr0 = kmap_atomic(spage, KM_USER0);
- kaddr1 = kmap_atomic(dpage, KM_USER1);
+ kaddr0 = kmap_atomic(spage);
+ kaddr1 = kmap_atomic(dpage);
memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
- kunmap_atomic(kaddr1, KM_USER1);
- kunmap_atomic(kaddr0, KM_USER0);
+ kunmap_atomic(kaddr1);
+ kunmap_atomic(kaddr0);
dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
dbh->b_blocknr = sbh->b_blocknr;
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index a604ac0..f1626f5 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -493,9 +493,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
if (unlikely(!bh_org))
return -EIO;
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(bh_org);
return 0;
}
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 850a7c0..dc9a913 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -227,9 +227,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
crc = crc32_le(crc, bh->b_data, bh->b_size);
}
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
raw_sum->ss_datasum = cpu_to_le32(crc);
}
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 0a0aba6..c5b7653 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
struct nilfs_sufile_header *header;
void *kaddr;
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh);
le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(header_bh);
}
@@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
ret = nilfs_sufile_get_header_block(sufile, &header_bh);
if (ret < 0)
goto out_sem;
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh);
ncleansegs = le64_to_cpu(header->sh_ncleansegs);
last_alloc = le64_to_cpu(header->sh_last_alloc);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
nsegments = nilfs_sufile_get_nsegments(sufile);
maxsegnum = sui->allocmax;
@@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
&su_bh);
if (ret < 0)
goto out_header;
- kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr);
@@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
continue;
/* found a clean segment */
nilfs_segment_usage_set_dirty(su);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh);
le64_add_cpu(&header->sh_ncleansegs, -1);
le64_add_cpu(&header->sh_ndirtysegs, 1);
header->sh_last_alloc = cpu_to_le64(segnum);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
sui->ncleansegs--;
mark_buffer_dirty(header_bh);
@@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
goto out_header;
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(su_bh);
}
@@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
struct nilfs_segment_usage *su;
void *kaddr;
- kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (unlikely(!nilfs_segment_usage_clean(su))) {
printk(KERN_WARNING "%s: segment %llu must be clean\n",
__func__, (unsigned long long)segnum);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
return;
}
nilfs_segment_usage_set_dirty(su);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
nilfs_sufile_mod_counter(header_bh, -1, 1);
NILFS_SUI(sufile)->ncleansegs--;
@@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
void *kaddr;
int clean, dirty;
- kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
su->su_nblocks == cpu_to_le32(0)) {
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
return;
}
clean = nilfs_segment_usage_clean(su);
@@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
su->su_lastmod = cpu_to_le64(0);
su->su_nblocks = cpu_to_le32(0);
su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
NILFS_SUI(sufile)->ncleansegs -= clean;
@@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
void *kaddr;
int sudirty;
- kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (nilfs_segment_usage_clean(su)) {
printk(KERN_WARNING "%s: segment %llu is already clean\n",
__func__, (unsigned long long)segnum);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
return;
}
WARN_ON(nilfs_segment_usage_error(su));
@@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
sudirty = nilfs_segment_usage_dirty(su);
nilfs_segment_usage_set_clean(su);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(su_bh);
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
@@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
if (ret < 0)
goto out_sem;
- kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
WARN_ON(nilfs_segment_usage_error(su));
if (modtime)
su->su_lastmod = cpu_to_le64(modtime);
su->su_nblocks = cpu_to_le32(nblocks);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile);
@@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
if (ret < 0)
goto out_sem;
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh);
sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
@@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
spin_lock(&nilfs->ns_last_segment_lock);
sustat->ss_prot_seq = nilfs->ns_prot_seq;
spin_unlock(&nilfs->ns_last_segment_lock);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(header_bh);
out_sem:
@@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
void *kaddr;
int suclean;
- kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (nilfs_segment_usage_error(su)) {
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
return;
}
suclean = nilfs_segment_usage_clean(su);
nilfs_segment_usage_set_error(su);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (suclean) {
nilfs_sufile_mod_counter(header_bh, -1, 0);
@@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
/* hole */
continue;
}
- kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr);
su2 = su;
@@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
nilfs_segment_is_active(nilfs, segnum + j)) {
ret = -EBUSY;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(su_bh);
goto out_header;
}
@@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
nc++;
}
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (nc > 0) {
mark_buffer_dirty(su_bh);
ncleaned += nc;
@@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
sui->ncleansegs -= nsegs - newnsegs;
}
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh);
header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(sufile);
@@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
continue;
}
- kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr);
for (j = 0; j < n;
@@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
si->sui_flags |=
(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(su_bh);
}
ret = nsegs;
@@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
goto failed;
sui = NILFS_SUI(sufile);
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh);
sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
brelse(header_bh);
sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 0b1e885b..fa9c05f 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -94,11 +94,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
if (file_ofs < init_size)
ofs = init_size - file_ofs;
local_irq_save(flags);
- kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ kaddr = kmap_atomic(page);
memset(kaddr + bh_offset(bh) + ofs, 0,
bh->b_size - ofs);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+ kunmap_atomic(kaddr);
local_irq_restore(flags);
}
} else {
@@ -147,11 +147,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
/* Should have been verified before we got here... */
BUG_ON(!recs);
local_irq_save(flags);
- kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
- kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+ kunmap_atomic(kaddr);
local_irq_restore(flags);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
@@ -504,7 +504,7 @@ retry_readpage:
/* Race with shrinking truncate. */
attr_len = i_size;
}
- addr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page);
/* Copy the data to the page. */
memcpy(addr, (u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
@@ -512,7 +512,7 @@ retry_readpage:
/* Zero the remainder of the page. */
memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
flush_dcache_page(page);
- kunmap_atomic(addr, KM_USER0);
+ kunmap_atomic(addr);
put_unm_err_out:
ntfs_attr_put_search_ctx(ctx);
unm_err_out:
@@ -746,14 +746,14 @@ lock_retry_remap:
unsigned long *bpos, *bend;
/* Check if the buffer is zero. */
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
bpos = (unsigned long *)(kaddr + bh_offset(bh));
bend = (unsigned long *)((u8*)bpos + blocksize);
do {
if (unlikely(*bpos))
break;
} while (likely(++bpos < bend));
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (bpos == bend) {
/*
* Buffer is zero and sparse, no need to write
@@ -1495,14 +1495,14 @@ retry_writepage:
/* Shrinking cannot fail. */
BUG_ON(err);
}
- addr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page);
/* Copy the data from the page to the mft record. */
memcpy((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
addr, attr_len);
/* Zero out of bounds area in the page cache page. */
memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
- kunmap_atomic(addr, KM_USER0);
+ kunmap_atomic(addr);
flush_dcache_page(page);
flush_dcache_mft_record_page(ctx->ntfs_ino);
/* We are done with the page. */
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index e028199..a27e3fe 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1656,12 +1656,12 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
attr_size = le32_to_cpu(a->data.resident.value_length);
BUG_ON(attr_size != data_size);
if (page && !PageUptodate(page)) {
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memcpy(kaddr, (u8*)a +
le16_to_cpu(a->data.resident.value_offset),
attr_size);
memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
flush_dcache_page(page);
SetPageUptodate(page);
}
@@ -1806,9 +1806,9 @@ undo_err_out:
sizeof(a->data.resident.reserved));
/* Copy the data from the page back to the attribute value. */
if (page) {
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memcpy((u8*)a + mp_ofs, kaddr, attr_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
/* Setup the allocated size in the ntfs inode in case it changed. */
write_lock_irqsave(&ni->size_lock, flags);
@@ -2540,10 +2540,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
size = PAGE_CACHE_SIZE;
if (idx == end)
size = end_ofs;
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr + start_ofs, val, size - start_ofs);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
set_page_dirty(page);
page_cache_release(page);
balance_dirty_pages_ratelimited(mapping);
@@ -2561,10 +2561,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
"page (index 0x%lx).", idx);
return -ENOMEM;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr, val, PAGE_CACHE_SIZE);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
/*
* If the page has buffers, mark them uptodate since buffer
* state and not page state is definitive in 2.6 kernels.
@@ -2598,10 +2598,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
"(error, index 0x%lx).", idx);
return PTR_ERR(page);
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr, val, end_ofs);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
set_page_dirty(page);
page_cache_release(page);
balance_dirty_pages_ratelimited(mapping);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index c587e2d..8639169 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -704,7 +704,7 @@ map_buffer_cached:
u8 *kaddr;
unsigned pofs;
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
if (bh_pos < pos) {
pofs = bh_pos & ~PAGE_CACHE_MASK;
memset(kaddr + pofs, 0, pos - bh_pos);
@@ -713,7 +713,7 @@ map_buffer_cached:
pofs = end & ~PAGE_CACHE_MASK;
memset(kaddr + pofs, 0, bh_end - end);
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
flush_dcache_page(page);
}
continue;
@@ -1287,9 +1287,9 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
len = PAGE_CACHE_SIZE - ofs;
if (len > bytes)
len = bytes;
- addr = kmap_atomic(*pages, KM_USER0);
+ addr = kmap_atomic(*pages);
left = __copy_from_user_inatomic(addr + ofs, buf, len);
- kunmap_atomic(addr, KM_USER0);
+ kunmap_atomic(addr);
if (unlikely(left)) {
/* Do it the slow way. */
addr = kmap(*pages);
@@ -1401,10 +1401,10 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
len = PAGE_CACHE_SIZE - ofs;
if (len > bytes)
len = bytes;
- addr = kmap_atomic(*pages, KM_USER0);
+ addr = kmap_atomic(*pages);
copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len);
- kunmap_atomic(addr, KM_USER0);
+ kunmap_atomic(addr);
if (unlikely(copied != len)) {
/* Do it the slow way. */
addr = kmap(*pages);
@@ -1691,7 +1691,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
BUG_ON(end > le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset));
kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
/* Copy the received data from the page to the mft record. */
memcpy(kattr + pos, kaddr + pos, bytes);
/* Update the attribute length if necessary. */
@@ -1713,7 +1713,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
flush_dcache_page(page);
SetPageUptodate(page);
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
/* Update initialized_size/i_size if necessary. */
read_lock_irqsave(&ni->size_lock, flags);
initialized_size = ni->initialized_size;
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index faece71..809c0e6 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -2008,14 +2008,14 @@ typedef struct {
*
* When a directory is small enough to fit inside the index root then this
* is the only attribute describing the directory. When the directory is too
- * large to fit in the index root, on the other hand, two aditional attributes
+ * large to fit in the index root, on the other hand, two additional attributes
* are present: an index allocation attribute, containing sub-nodes of the B+
* directory tree (see below), and a bitmap attribute, describing which virtual
* cluster numbers (vcns) in the index allocation attribute are in use by an
* index block.
*
* NOTE: The root directory (FILE_root) contains an entry for itself. Other
- * dircetories do not contain entries for themselves, though.
+ * directories do not contain entries for themselves, though.
*/
typedef struct {
ATTR_TYPE type; /* Type of the indexed attribute. Is
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 3502a9e..b341492 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2473,7 +2473,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
/*
* Subtract the number of set bits. If this
* is the last page and it is partial we don't really care as
@@ -2483,7 +2483,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
*/
nr_free -= bitmap_weight(kaddr,
PAGE_CACHE_SIZE * BITS_PER_BYTE);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
page_cache_release(page);
}
ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
@@ -2544,7 +2544,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
/*
* Subtract the number of set bits. If this
* is the last page and it is partial we don't really care as
@@ -2554,7 +2554,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
*/
nr_free -= bitmap_weight(kaddr,
PAGE_CACHE_SIZE * BITS_PER_BYTE);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
page_cache_release(page);
}
ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 78b68af..6577432 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -102,7 +102,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
* copy, the data is still good. */
if (buffer_jbd(buffer_cache_bh)
&& ocfs2_inode_is_new(inode)) {
- kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
+ kaddr = kmap_atomic(bh_result->b_page);
if (!kaddr) {
mlog(ML_ERROR, "couldn't kmap!\n");
goto bail;
@@ -110,7 +110,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
memcpy(kaddr + (bh_result->b_size * iblock),
buffer_cache_bh->b_data,
bh_result->b_size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
set_buffer_uptodate(bh_result);
}
brelse(buffer_cache_bh);
@@ -236,13 +236,13 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
return -EROFS;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
if (size)
memcpy(kaddr, di->id2.i_data.id_data, size);
/* Clear the remaining part of the page */
memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
SetPageUptodate(page);
@@ -689,7 +689,7 @@ static void ocfs2_clear_page_regions(struct page *page,
ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
if (from || to) {
if (from > cluster_start)
@@ -700,7 +700,7 @@ static void ocfs2_clear_page_regions(struct page *page,
memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
/*
@@ -1981,9 +1981,9 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
}
}
- kaddr = kmap_atomic(wc->w_target_page, KM_USER0);
+ kaddr = kmap_atomic(wc->w_target_page);
memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
trace_ocfs2_write_end_inline(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
diff --git a/fs/pipe.c b/fs/pipe.c
index a932ced..fe0502f 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -230,7 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
{
if (atomic) {
buf->flags |= PIPE_BUF_FLAG_ATOMIC;
- return kmap_atomic(buf->page, KM_USER0);
+ return kmap_atomic(buf->page);
}
return kmap(buf->page);
@@ -251,7 +251,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
{
if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
- kunmap_atomic(map_data, KM_USER0);
+ kunmap_atomic(map_data);
} else
kunmap(buf->page);
}
@@ -565,14 +565,14 @@ redo1:
iov_fault_in_pages_read(iov, chars);
redo2:
if (atomic)
- src = kmap_atomic(page, KM_USER0);
+ src = kmap_atomic(page);
else
src = kmap(page);
error = pipe_iov_copy_from_user(src, iov, chars,
atomic);
if (atomic)
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(src);
else
kunmap(page);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d4548dd..965d4bd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1310,8 +1310,7 @@ sched_autogroup_write(struct file *file, const char __user *buf,
if (!p)
return -ESRCH;
- err = nice;
- err = proc_sched_autogroup_set_nice(p, &err);
+ err = proc_sched_autogroup_set_nice(p, nice);
if (err)
count = err;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index d245cb2..e5e69af 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -513,7 +513,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
n = copy_to_user(buffer, (char *)start, tsz);
/*
- * We cannot distingush between fault on source
+ * We cannot distinguish between fault on source
* and fault on destination. When this happens
* we clear too and hope it will trigger the
* EFAULT again.
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index a6b6217..67bbf6e 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -6,7 +6,9 @@
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
+#include <linux/sched.h>
#include <linux/namei.h>
+#include <linux/mm.h>
#include "internal.h"
static const struct dentry_operations proc_sys_dentry_operations;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 4674197..8b4f12b 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -71,6 +71,7 @@
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
+#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/namei.h>
#include <linux/capability.h>
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index c4b73f9..79e5a8b 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -975,7 +975,7 @@ static int leaf_cut_entries(struct buffer_head *bh,
remove */
RFALSE(!is_direntry_le_ih(ih), "10180: item is not directory item");
RFALSE(I_ENTRY_COUNT(ih) < from + del_count,
- "10185: item contains not enough entries: entry_cout = %d, from = %d, to delete = %d",
+ "10185: item contains not enough entries: entry_count = %d, from = %d, to delete = %d",
I_ENTRY_COUNT(ih), from, del_count);
if (del_count == 0)
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index f1b68af..f8afa4b 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1284,12 +1284,12 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
** -clm
*/
- data = kmap_atomic(un_bh->b_page, KM_USER0);
+ data = kmap_atomic(un_bh->b_page);
off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
memcpy(data + off,
B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih),
ret_value);
- kunmap_atomic(data, KM_USER0);
+ kunmap_atomic(data);
}
/* Perform balancing after all resources have been collected at once. */
do_balance(&s_del_balance, NULL, NULL, M_DELETE);
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index 32f9a80..5e2624d 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -128,9 +128,9 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
if (up_to_date_bh) {
unsigned pgoff =
(tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
- char *kaddr = kmap_atomic(up_to_date_bh->b_page, KM_USER0);
+ char *kaddr = kmap_atomic(up_to_date_bh->b_page);
memset(kaddr + pgoff, 0, blk_size - total_tail);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
diff --git a/fs/splice.c b/fs/splice.c
index 1ec0493..f16402e 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -737,15 +737,12 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
goto out;
if (buf->page != page) {
- /*
- * Careful, ->map() uses KM_USER0!
- */
char *src = buf->ops->map(pipe, buf, 1);
- char *dst = kmap_atomic(page, KM_USER1);
+ char *dst = kmap_atomic(page);
memcpy(dst + offset, src + buf->offset, this_len);
flush_dcache_page(page);
- kunmap_atomic(dst, KM_USER1);
+ kunmap_atomic(dst);
buf->ops->unmap(pipe, buf, src);
}
ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 38bb1c6..8ca62c2 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -464,10 +464,10 @@ static int squashfs_readpage(struct file *file, struct page *page)
if (PageUptodate(push_page))
goto skip_page;
- pageaddr = kmap_atomic(push_page, KM_USER0);
+ pageaddr = kmap_atomic(push_page);
squashfs_copy_data(pageaddr, buffer, offset, avail);
memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
- kunmap_atomic(pageaddr, KM_USER0);
+ kunmap_atomic(pageaddr);
flush_dcache_page(push_page);
SetPageUptodate(push_page);
skip_page:
@@ -484,9 +484,9 @@ skip_page:
error_out:
SetPageError(page);
out:
- pageaddr = kmap_atomic(page, KM_USER0);
+ pageaddr = kmap_atomic(page);
memset(pageaddr, 0, PAGE_CACHE_SIZE);
- kunmap_atomic(pageaddr, KM_USER0);
+ kunmap_atomic(pageaddr);
flush_dcache_page(page);
if (!PageError(page))
SetPageUptodate(page);
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 1191817..12806df 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -90,14 +90,14 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
goto error_out;
}
- pageaddr = kmap_atomic(page, KM_USER0);
+ pageaddr = kmap_atomic(page);
copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
length - bytes);
if (copied == length - bytes)
memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
else
block = entry->next_index;
- kunmap_atomic(pageaddr, KM_USER0);
+ kunmap_atomic(pageaddr);
squashfs_cache_put(entry);
}
diff --git a/fs/super.c b/fs/super.c
index 6277ec6..d90e900 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -32,6 +32,7 @@
#include <linux/backing-dev.h>
#include <linux/rculist_bl.h>
#include <linux/cleancache.h>
+#include <linux/fsnotify.h>
#include "internal.h"
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 7fdf6a7..2a7a3f5 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -22,76 +22,103 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/security.h>
+#include <linux/hash.h>
#include "sysfs.h"
DEFINE_MUTEX(sysfs_mutex);
DEFINE_SPINLOCK(sysfs_assoc_lock);
+#define to_sysfs_dirent(X) rb_entry((X), struct sysfs_dirent, s_rb);
+
static DEFINE_SPINLOCK(sysfs_ino_lock);
static DEFINE_IDA(sysfs_ino_ida);
/**
- * sysfs_link_sibling - link sysfs_dirent into sibling list
+ * sysfs_name_hash
+ * @ns: Namespace tag to hash
+ * @name: Null terminated string to hash
+ *
+ * Returns 31 bit hash of ns + name (so it fits in an off_t )
+ */
+static unsigned int sysfs_name_hash(const void *ns, const char *name)
+{
+ unsigned long hash = init_name_hash();
+ unsigned int len = strlen(name);
+ while (len--)
+ hash = partial_name_hash(*name++, hash);
+ hash = ( end_name_hash(hash) ^ hash_ptr( (void *)ns, 31 ) );
+ hash &= 0x7fffffffU;
+ /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
+ if (hash < 1)
+ hash += 2;
+ if (hash >= INT_MAX)
+ hash = INT_MAX - 1;
+ return hash;
+}
+
+static int sysfs_name_compare(unsigned int hash, const void *ns,
+ const char *name, const struct sysfs_dirent *sd)
+{
+ if (hash != sd->s_hash)
+ return hash - sd->s_hash;
+ if (ns != sd->s_ns)
+ return ns - sd->s_ns;
+ return strcmp(name, sd->s_name);
+}
+
+static int sysfs_sd_compare(const struct sysfs_dirent *left,
+ const struct sysfs_dirent *right)
+{
+ return sysfs_name_compare(left->s_hash, left->s_ns, left->s_name,
+ right);
+}
+
+/**
+ * sysfs_link_subling - link sysfs_dirent into sibling rbtree
* @sd: sysfs_dirent of interest
*
- * Link @sd into its sibling list which starts from
+ * Link @sd into its sibling rbtree which starts from
* sd->s_parent->s_dir.children.
*
* Locking:
* mutex_lock(sysfs_mutex)
+ *
+ * RETURNS:
+ * 0 on susccess -EEXIST on failure.
*/
-static void sysfs_link_sibling(struct sysfs_dirent *sd)
+static int sysfs_link_sibling(struct sysfs_dirent *sd)
{
- struct sysfs_dirent *parent_sd = sd->s_parent;
-
- struct rb_node **p;
- struct rb_node *parent;
+ struct rb_node **node = &sd->s_parent->s_dir.children.rb_node;
+ struct rb_node *parent = NULL;
if (sysfs_type(sd) == SYSFS_DIR)
- parent_sd->s_dir.subdirs++;
-
- p = &parent_sd->s_dir.inode_tree.rb_node;
- parent = NULL;
- while (*p) {
- parent = *p;
-#define node rb_entry(parent, struct sysfs_dirent, inode_node)
- if (sd->s_ino < node->s_ino) {
- p = &node->inode_node.rb_left;
- } else if (sd->s_ino > node->s_ino) {
- p = &node->inode_node.rb_right;
- } else {
- printk(KERN_CRIT "sysfs: inserting duplicate inode '%lx'\n",
- (unsigned long) sd->s_ino);
- BUG();
- }
-#undef node
- }
- rb_link_node(&sd->inode_node, parent, p);
- rb_insert_color(&sd->inode_node, &parent_sd->s_dir.inode_tree);
-
- p = &parent_sd->s_dir.name_tree.rb_node;
- parent = NULL;
- while (*p) {
- int c;
- parent = *p;
-#define node rb_entry(parent, struct sysfs_dirent, name_node)
- c = strcmp(sd->s_name, node->s_name);
- if (c < 0) {
- p = &node->name_node.rb_left;
- } else {
- p = &node->name_node.rb_right;
- }
-#undef node
+ sd->s_parent->s_dir.subdirs++;
+
+ while (*node) {
+ struct sysfs_dirent *pos;
+ int result;
+
+ pos = to_sysfs_dirent(*node);
+ parent = *node;
+ result = sysfs_sd_compare(sd, pos);
+ if (result < 0)
+ node = &pos->s_rb.rb_left;
+ else if (result > 0)
+ node = &pos->s_rb.rb_right;
+ else
+ return -EEXIST;
}
- rb_link_node(&sd->name_node, parent, p);
- rb_insert_color(&sd->name_node, &parent_sd->s_dir.name_tree);
+ /* add new node and rebalance the tree */
+ rb_link_node(&sd->s_rb, parent, node);
+ rb_insert_color(&sd->s_rb, &sd->s_parent->s_dir.children);
+ return 0;
}
/**
- * sysfs_unlink_sibling - unlink sysfs_dirent from sibling list
+ * sysfs_unlink_sibling - unlink sysfs_dirent from sibling rbtree
* @sd: sysfs_dirent of interest
*
- * Unlink @sd from its sibling list which starts from
+ * Unlink @sd from its sibling rbtree which starts from
* sd->s_parent->s_dir.children.
*
* Locking:
@@ -102,8 +129,7 @@ static void sysfs_unlink_sibling(struct sysfs_dirent *sd)
if (sysfs_type(sd) == SYSFS_DIR)
sd->s_parent->s_dir.subdirs--;
- rb_erase(&sd->inode_node, &sd->s_parent->s_dir.inode_tree);
- rb_erase(&sd->name_node, &sd->s_parent->s_dir.name_tree);
+ rb_erase(&sd->s_rb, &sd->s_parent->s_dir.children);
}
/**
@@ -198,7 +224,7 @@ static void sysfs_deactivate(struct sysfs_dirent *sd)
rwsem_release(&sd->dep_map, 1, _RET_IP_);
}
-static int sysfs_alloc_ino(ino_t *pino)
+static int sysfs_alloc_ino(unsigned int *pino)
{
int ino, rc;
@@ -217,7 +243,7 @@ static int sysfs_alloc_ino(ino_t *pino)
return rc;
}
-static void sysfs_free_ino(ino_t ino)
+static void sysfs_free_ino(unsigned int ino)
{
spin_lock(&sysfs_ino_lock);
ida_remove(&sysfs_ino_ida, ino);
@@ -402,6 +428,7 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
struct sysfs_inode_attrs *ps_iattr;
+ int ret;
if (!!sysfs_ns_type(acxt->parent_sd) != !!sd->s_ns) {
WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
@@ -410,12 +437,12 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
return -EINVAL;
}
- if (sysfs_find_dirent(acxt->parent_sd, sd->s_ns, sd->s_name))
- return -EEXIST;
-
+ sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
sd->s_parent = sysfs_get(acxt->parent_sd);
- sysfs_link_sibling(sd);
+ ret = sysfs_link_sibling(sd);
+ if (ret)
+ return ret;
/* Update timestamps on the parent */
ps_iattr = acxt->parent_sd->s_iattr;
@@ -565,8 +592,8 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
const void *ns,
const unsigned char *name)
{
- struct rb_node *p = parent_sd->s_dir.name_tree.rb_node;
- struct sysfs_dirent *found = NULL;
+ struct rb_node *node = parent_sd->s_dir.children.rb_node;
+ unsigned int hash;
if (!!sysfs_ns_type(parent_sd) != !!ns) {
WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
@@ -575,33 +602,21 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
return NULL;
}
- while (p) {
- int c;
-#define node rb_entry(p, struct sysfs_dirent, name_node)
- c = strcmp(name, node->s_name);
- if (c < 0) {
- p = node->name_node.rb_left;
- } else if (c > 0) {
- p = node->name_node.rb_right;
- } else {
- found = node;
- p = node->name_node.rb_left;
- }
-#undef node
- }
-
- if (found) {
- while (found->s_ns != ns) {
- p = rb_next(&found->name_node);
- if (!p)
- return NULL;
- found = rb_entry(p, struct sysfs_dirent, name_node);
- if (strcmp(name, found->s_name))
- return NULL;
- }
+ hash = sysfs_name_hash(ns, name);
+ while (node) {
+ struct sysfs_dirent *sd;
+ int result;
+
+ sd = to_sysfs_dirent(node);
+ result = sysfs_name_compare(hash, ns, name, sd);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return sd;
}
-
- return found;
+ return NULL;
}
/**
@@ -804,9 +819,9 @@ static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd)
pr_debug("sysfs %s: removing dir\n", dir_sd->s_name);
sysfs_addrm_start(&acxt, dir_sd);
- pos = rb_first(&dir_sd->s_dir.inode_tree);
+ pos = rb_first(&dir_sd->s_dir.children);
while (pos) {
- struct sysfs_dirent *sd = rb_entry(pos, struct sysfs_dirent, inode_node);
+ struct sysfs_dirent *sd = to_sysfs_dirent(pos);
pos = rb_next(pos);
if (sysfs_type(sd) != SYSFS_DIR)
sysfs_remove_one(&acxt, sd);
@@ -863,6 +878,7 @@ int sysfs_rename(struct sysfs_dirent *sd,
dup_name = sd->s_name;
sd->s_name = new_name;
+ sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
}
/* Move to the appropriate place in the appropriate directories rbtree. */
@@ -919,38 +935,36 @@ static int sysfs_dir_release(struct inode *inode, struct file *filp)
}
static struct sysfs_dirent *sysfs_dir_pos(const void *ns,
- struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos)
+ struct sysfs_dirent *parent_sd, loff_t hash, struct sysfs_dirent *pos)
{
if (pos) {
int valid = !(pos->s_flags & SYSFS_FLAG_REMOVED) &&
pos->s_parent == parent_sd &&
- ino == pos->s_ino;
+ hash == pos->s_hash;
sysfs_put(pos);
if (!valid)
pos = NULL;
}
- if (!pos && (ino > 1) && (ino < INT_MAX)) {
- struct rb_node *p = parent_sd->s_dir.inode_tree.rb_node;
- while (p) {
-#define node rb_entry(p, struct sysfs_dirent, inode_node)
- if (ino < node->s_ino) {
- pos = node;
- p = node->inode_node.rb_left;
- } else if (ino > node->s_ino) {
- p = node->inode_node.rb_right;
- } else {
- pos = node;
+ if (!pos && (hash > 1) && (hash < INT_MAX)) {
+ struct rb_node *node = parent_sd->s_dir.children.rb_node;
+ while (node) {
+ pos = to_sysfs_dirent(node);
+
+ if (hash < pos->s_hash)
+ node = node->rb_left;
+ else if (hash > pos->s_hash)
+ node = node->rb_right;
+ else
break;
- }
-#undef node
}
}
+ /* Skip over entries in the wrong namespace */
while (pos && pos->s_ns != ns) {
- struct rb_node *p = rb_next(&pos->inode_node);
- if (!p)
+ struct rb_node *node = rb_next(&pos->s_rb);
+ if (!node)
pos = NULL;
else
- pos = rb_entry(p, struct sysfs_dirent, inode_node);
+ pos = to_sysfs_dirent(node);
}
return pos;
}
@@ -960,11 +974,11 @@ static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns,
{
pos = sysfs_dir_pos(ns, parent_sd, ino, pos);
if (pos) do {
- struct rb_node *p = rb_next(&pos->inode_node);
- if (!p)
+ struct rb_node *node = rb_next(&pos->s_rb);
+ if (!node)
pos = NULL;
else
- pos = rb_entry(p, struct sysfs_dirent, inode_node);
+ pos = to_sysfs_dirent(node);
} while (pos && pos->s_ns != ns);
return pos;
}
@@ -1006,7 +1020,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
len = strlen(name);
ino = pos->s_ino;
type = dt_type(pos);
- filp->f_pos = ino;
+ filp->f_pos = pos->s_hash;
filp->private_data = sysfs_get(pos);
mutex_unlock(&sysfs_mutex);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 85eb816..feb2d69 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -136,12 +136,13 @@ static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata, u32 *sec
void *old_secdata;
size_t old_secdata_len;
- iattrs = sd->s_iattr;
- if (!iattrs)
- iattrs = sysfs_init_inode_attrs(sd);
- if (!iattrs)
- return -ENOMEM;
+ if (!sd->s_iattr) {
+ sd->s_iattr = sysfs_init_inode_attrs(sd);
+ if (!sd->s_iattr)
+ return -ENOMEM;
+ }
+ iattrs = sd->s_iattr;
old_secdata = iattrs->ia_secdata;
old_secdata_len = iattrs->ia_secdata_len;
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 2243f8e..52c3bdb6 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -36,7 +36,7 @@ struct sysfs_dirent sysfs_root = {
.s_name = "",
.s_count = ATOMIC_INIT(1),
.s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
- .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
+ .s_mode = S_IFDIR | S_IRUGO | S_IXUGO,
.s_ino = 1,
};
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 7484a36..661a963 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -20,9 +20,8 @@ struct sysfs_elem_dir {
struct kobject *kobj;
unsigned long subdirs;
-
- struct rb_root inode_tree;
- struct rb_root name_tree;
+ /* children rbtree starts here and goes through sd->s_rb */
+ struct rb_root children;
};
struct sysfs_elem_symlink {
@@ -62,8 +61,7 @@ struct sysfs_dirent {
struct sysfs_dirent *s_parent;
const char *s_name;
- struct rb_node inode_node;
- struct rb_node name_node;
+ struct rb_node s_rb;
union {
struct completion *completion;
@@ -71,6 +69,7 @@ struct sysfs_dirent {
} u;
const void *s_ns; /* namespace tag */
+ unsigned int s_hash; /* ns + name hash */
union {
struct sysfs_elem_dir s_dir;
struct sysfs_elem_symlink s_symlink;
@@ -78,9 +77,9 @@ struct sysfs_dirent {
struct sysfs_elem_bin_attr s_bin_attr;
};
- unsigned int s_flags;
+ unsigned short s_flags;
umode_t s_mode;
- ino_t s_ino;
+ unsigned int s_ino;
struct sysfs_inode_attrs *s_iattr;
};
@@ -95,11 +94,11 @@ struct sysfs_dirent {
#define SYSFS_ACTIVE_REF (SYSFS_KOBJ_ATTR | SYSFS_KOBJ_BIN_ATTR)
/* identify any namespace tag on sysfs_dirents */
-#define SYSFS_NS_TYPE_MASK 0xff00
+#define SYSFS_NS_TYPE_MASK 0xf00
#define SYSFS_NS_TYPE_SHIFT 8
#define SYSFS_FLAG_MASK ~(SYSFS_NS_TYPE_MASK|SYSFS_TYPE_MASK)
-#define SYSFS_FLAG_REMOVED 0x020000
+#define SYSFS_FLAG_REMOVED 0x02000
static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
{
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index f9c234b..5c8f6dc 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1042,10 +1042,10 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (i_size > synced_i_size) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index d567b84..7f3f7ba 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -87,10 +87,10 @@ static int udf_adinicb_write_end(struct file *file,
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
kaddr + offset, copied);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
}
OpenPOWER on IntegriCloud