hfsplus: lift the 2TB size limit

Replace the hardcoded 2TB limit with a dynamic limit based on the block
size now that we have fixed the few overflows preventing operation
with large volumes.

Signed-off-by: Christoph Hellwig <hch@tuxera.com>
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 84a47b7..acaef57 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -393,6 +393,13 @@
 	if (!sbi->rsrc_clump_blocks)
 		sbi->rsrc_clump_blocks = 1;
 
+	err = generic_check_addressable(sbi->alloc_blksz_shift,
+					sbi->total_blocks);
+	if (err) {
+		printk(KERN_ERR "hfs: filesystem size too large.\n");
+		goto out_free_vhdr;
+	}
+
 	/* Set up operations so we can load metadata */
 	sb->s_op = &hfsplus_sops;
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -417,6 +424,8 @@
 		sb->s_flags |= MS_RDONLY;
 	}
 
+	err = -EINVAL;
+
 	/* Load metadata objects (B*Trees) */
 	sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
 	if (!sbi->ext_tree) {
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 4b86468..2f933e8 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -141,10 +141,6 @@
 
 	if (hfsplus_get_last_session(sb, &part_start, &part_size))
 		goto out;
-	if ((u64)part_start + part_size > 0x100000000ULL) {
-		pr_err("hfs: volumes larger than 2TB are not supported yet\n");
-		goto out;
-	}
 
 	error = -ENOMEM;
 	sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);