summaryrefslogtreecommitdiffstats
path: root/fs/hfsplus
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@tuxera.com>2011-02-16 09:34:26 +0100
committerChristoph Hellwig <hch@lst.de>2011-06-30 13:40:59 +0200
commitc6d5f5fa658f2569a7baaff5acda261a1316cee9 (patch)
tree1e7e03558a9229e8fc3aaec450ffa8049a08c96b /fs/hfsplus
parent4ba2d5fdcfd19de0dedf394ddc48db2f219fa89a (diff)
downloadop-kernel-dev-c6d5f5fa658f2569a7baaff5acda261a1316cee9.zip
op-kernel-dev-c6d5f5fa658f2569a7baaff5acda261a1316cee9.tar.gz
hfsplus: lift the 2TB size limit
Replace the hardcoded 2TB limit with a dynamic limit based on the block size now that we have fixed the few overflows preventing operation with large volumes. Signed-off-by: Christoph Hellwig <hch@tuxera.com>
Diffstat (limited to 'fs/hfsplus')
-rw-r--r--fs/hfsplus/super.c9
-rw-r--r--fs/hfsplus/wrapper.c4
2 files changed, 9 insertions, 4 deletions
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 84a47b7..acaef57 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -393,6 +393,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi->rsrc_clump_blocks)
sbi->rsrc_clump_blocks = 1;
+ err = generic_check_addressable(sbi->alloc_blksz_shift,
+ sbi->total_blocks);
+ if (err) {
+ printk(KERN_ERR "hfs: filesystem size too large.\n");
+ goto out_free_vhdr;
+ }
+
/* Set up operations so we can load metadata */
sb->s_op = &hfsplus_sops;
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -417,6 +424,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY;
}
+ err = -EINVAL;
+
/* Load metadata objects (B*Trees) */
sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
if (!sbi->ext_tree) {
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 4b86468..2f933e8 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -141,10 +141,6 @@ int hfsplus_read_wrapper(struct super_block *sb)
if (hfsplus_get_last_session(sb, &part_start, &part_size))
goto out;
- if ((u64)part_start + part_size > 0x100000000ULL) {
- pr_err("hfs: volumes larger than 2TB are not supported yet\n");
- goto out;
- }
error = -ENOMEM;
sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
OpenPOWER on IntegriCloud