[Pkg-lustre-svn-commit] updated: [b6aa547] Added first version of vanilla kernel 2.6.32 patchset

Marco Nelles marco.nelles at credativ.com
Thu Jun 28 12:01:55 UTC 2012


The following commit has been merged in the master branch:
commit b6aa547223759f6e6412d9cd93bb3bbdccd52259
Author: Marco Nelles <marco.nelles at credativ.com>
Date:   Thu Jun 28 14:00:59 2012 +0200

    Added first version of vanilla kernel 2.6.32 patchset

diff --git a/debian/patches/ldiskfs-kernel-patchset.patch b/debian/patches/ldiskfs-kernel-patchset.patch
new file mode 100644
index 0000000..a455804
--- /dev/null
+++ b/debian/patches/ldiskfs-kernel-patchset.patch
@@ -0,0 +1,8581 @@
+diff --git a/ldiskfs/kernel_patches/patches/export-ext4-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/export-ext4-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..79a0e7f
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/export-ext4-2.6.32-vanilla.patch
+@@ -0,0 +1,81 @@
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:09:14.205662995 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:09:19.413668742 +0200
++@@ -292,6 +292,8 @@
++ 	jbd2_journal_abort_handle(handle);
++ }
++ 
+++EXPORT_SYMBOL(ext4_journal_abort_handle);
+++
++ /* Deal with the reporting of failure conditions on a filesystem such as
++  * inconsistencies detected or read IO failures.
++  *
++@@ -3001,6 +3003,8 @@
++ 	return ret;
++ }
++ 
+++EXPORT_SYMBOL(ext4_force_commit);
+++
++ /*
++  * Setup any per-fs journal parameters now.  We'll do this both on
++  * initial mount, once the journal has been initialised but before we've
++@@ -4056,6 +4060,12 @@
++ 			unsigned long *blocks, int *created, int create);
++ EXPORT_SYMBOL(ext4_map_inode_page);
++ 
+++EXPORT_SYMBOL(ext4_xattr_get);
+++EXPORT_SYMBOL(ext4_xattr_set_handle);
+++EXPORT_SYMBOL(ext4_bread);
+++EXPORT_SYMBOL(ext4_journal_start_sb);
+++EXPORT_SYMBOL(__ext4_journal_stop);
+++
++ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
++ MODULE_DESCRIPTION("Fourth Extended Filesystem");
++ MODULE_LICENSE("GPL");
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:08:38.193663076 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:09:19.417666703 +0200
++@@ -1498,6 +1498,8 @@
++ 				       struct buffer_head *bh,
++ 				       ext4_group_t group,
++ 				       struct ext4_group_desc *desc);
+++extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb,
+++						  ext4_group_t block_group);
++ extern void mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
++ 
++ /* mballoc.c */
++Index: linux-source-2.6.32/fs/ext4/ialloc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:08:38.081662455 +0200
+++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:09:19.417666703 +0200
++@@ -98,7 +98,7 @@
++  *
++  * Return buffer_head of bitmap on success or NULL.
++  */
++-static struct buffer_head *
+++struct buffer_head *
++ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
++ {
++ 	struct ext4_group_desc *desc;
++@@ -161,6 +161,7 @@
++ 	}
++ 	return bh;
++ }
+++EXPORT_SYMBOL(ext4_read_inode_bitmap);
++ 
++ /*
++  * NOTE! When we get the inode, we're the only people
++Index: linux-source-2.6.32/fs/ext4/balloc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/balloc.c	2012-06-28 12:08:37.973664862 +0200
+++++ linux-source-2.6.32/fs/ext4/balloc.c	2012-06-28 12:09:19.417666703 +0200
++@@ -232,6 +232,7 @@
++ 		*bh = sbi->s_group_desc[group_desc];
++ 	return desc;
++ }
+++EXPORT_SYMBOL(ext4_get_group_desc);
++ 
++ static int ext4_valid_block_bitmap(struct super_block *sb,
++ 					struct ext4_group_desc *desc,
+diff --git a/ldiskfs/kernel_patches/patches/ext4-alloc-policy-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-alloc-policy-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..9f72327
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-alloc-policy-2.6.32-vanilla.patch
+@@ -0,0 +1,87 @@
++Index: linux-source-2.6.32/fs/ext4/ialloc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:10:06.381677398 +0200
+++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:10:30.005662279 +0200
++@@ -1086,6 +1086,36 @@
++ 	return ERR_PTR(err);
++ }
++ 
+++unsigned long ext4_find_reverse(struct super_block *sb)
+++{
+++	struct ext4_group_desc *desc;
+++	struct buffer_head *bitmap_bh = NULL;
+++	int group;
+++	unsigned long ino, offset;
+++
+++	for (offset = (EXT4_INODES_PER_GROUP(sb) >> 1); offset >= 0;
+++	     offset >>= 1) {
+++		for (group = EXT4_SB(sb)->s_groups_count - 1; group >= 0;
+++		     --group) {
+++			desc = ext4_get_group_desc(sb, group, NULL);
+++			if (ext4_free_inodes_count(sb, desc) == 0)
+++				continue;
+++
+++			bitmap_bh = ext4_read_inode_bitmap(sb, group);
+++			if (!bitmap_bh)
+++				continue;
+++
+++			ino = ext4_find_next_zero_bit((unsigned long *)
+++					bitmap_bh->b_data,
+++					EXT4_INODES_PER_GROUP(sb), offset);
+++			if (ino < EXT4_INODES_PER_GROUP(sb))
+++				return (group * EXT4_INODES_PER_GROUP(sb) +
+++				       ino + 1);
+++		}
+++	}
+++	return 0;
+++}
+++
++ /* Verify that we are loading a valid orphan from disk */
++ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
++ {
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:09.617666134 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:30.005662279 +0200
++@@ -155,6 +155,12 @@
++ 	u32		ldp_magic;
++ };
++ 
+++/* Only use the least 3 bits of ldp_flags for goal policy */
+++typedef enum {
+++	DP_GOAL_POLICY       = 0,
+++	DP_LASTGROUP_REVERSE = 1,
+++} dp_policy_t;
+++
++ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
++ static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
++ static inline unsigned dx_get_hash(struct dx_entry *entry);
++@@ -1802,8 +1808,14 @@
++ 	if (dentry->d_fsdata != NULL) {
++ 		struct lvfs_dentry_params *param = dentry->d_fsdata;
++ 
++-		if (param->ldp_magic == LVFS_DENTRY_PARAM_MAGIC)
++-			inum = param->ldp_inum;
+++		if (param->ldp_magic == LVFS_DENTRY_PARAM_MAGIC) {
+++			if ((dp_policy_t)(param->ldp_flags & 0x7) ==
+++			    DP_LASTGROUP_REVERSE)
+++				inum = ext4_find_reverse(sb);
+++			else /* DP_GOAL_POLICY */
+++				inum = param->ldp_inum;
+++		}
+++
++ 	}
++ 	return inum;
++ }
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:23.325664479 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:30.009672059 +0200
++@@ -1581,6 +1581,7 @@
++ extern struct inode *ext4_new_inode(handle_t *, struct inode *, int,
++ 				    const struct qstr *qstr, __u32 goal);
++ extern void ext4_free_inode(handle_t *, struct inode *);
+++extern unsigned long ext4_find_reverse(struct super_block *);
++ extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
++ extern unsigned long ext4_count_free_inodes(struct super_block *);
++ extern unsigned long ext4_count_dirs(struct super_block *);
+diff --git a/ldiskfs/kernel_patches/patches/ext4-back-dquot-to-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-back-dquot-to-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..0025564
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-back-dquot-to-2.6.32-vanilla.patch
+@@ -0,0 +1,54 @@
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:11:24.113671273 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:11:27.705663349 +0200
++@@ -1017,9 +1017,47 @@
++ static ssize_t ext4_quota_write(struct super_block *sb, int type,
++ 				const char *data, size_t len, loff_t off);
++ 
+++static int ext4_dquot_initialize(struct inode *inode, int type)
+++{
+++	handle_t *handle;
+++	int ret, err;
+++
+++	/* We may create quota structure so we need to reserve enough blocks */
+++	handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
+++	if (IS_ERR(handle))
+++		return PTR_ERR(handle);
+++	ret = dquot_initialize(inode, type);
+++	err = ext4_journal_stop(handle);
+++	if (!ret)
+++		ret = err;
+++	return ret;
+++}
+++
+++static int ext4_dquot_drop(struct inode *inode)
+++{
+++	handle_t *handle;
+++	int ret, err;
+++
+++	/* We may delete quota structure so we need to reserve enough blocks */
+++	handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
+++	if (IS_ERR(handle)) {
+++		/*
+++		 * We call dquot_drop() anyway to at least release references
+++		 * to quota structures so that umount does not hang.
+++		 */
+++		dquot_drop(inode);
+++		return PTR_ERR(handle);
+++	}
+++	ret = dquot_drop(inode);
+++	err = ext4_journal_stop(handle);
+++	if (!ret)
+++		ret = err;
+++	return ret;
+++}
+++
++ static const struct dquot_operations ext4_quota_operations = {
++-	.initialize	= dquot_initialize,
++-	.drop		= dquot_drop,
+++	.initialize	= ext4_dquot_initialize,
+++	.drop		= ext4_dquot_drop,
++ 	.alloc_space	= dquot_alloc_space,
++ 	.reserve_space	= dquot_reserve_space,
++ 	.claim_space	= dquot_claim_space,
+diff --git a/ldiskfs/kernel_patches/patches/ext4-big-endian-check-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-big-endian-check-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..2ca65a5
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-big-endian-check-2.6.32-vanilla.patch
+@@ -0,0 +1,57 @@
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:23.337667624 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:26.929665337 +0200
++@@ -72,6 +72,8 @@
++ static int ext4_freeze(struct super_block *sb);
++ 
++ 
+++static int bigendian_extents;
+++
++ ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
++ 			       struct ext4_group_desc *bg)
++ {
++@@ -1107,7 +1109,7 @@
++ 	Opt_stripe, Opt_delalloc, Opt_nodelalloc,
++ 	Opt_block_validity, Opt_noblock_validity,
++ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
++-	Opt_mballoc,
+++	Opt_mballoc, Opt_bigendian_extents,
++ 	Opt_discard, Opt_nodiscard,
++ };
++ 
++@@ -1177,6 +1179,7 @@
++ 	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
++ 	{Opt_auto_da_alloc, "auto_da_alloc"},
++ 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
+++	{Opt_bigendian_extents, "bigendian_extents"},
++ 	{Opt_mballoc, "mballoc"},
++ 	{Opt_discard, "discard"},
++ 	{Opt_nodiscard, "nodiscard"},
++@@ -1612,6 +1615,9 @@
++ 			else
++ 				set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
++ 			break;
+++		case Opt_bigendian_extents:
+++			bigendian_extents = 1;
+++			break;
++ 		case Opt_discard:
++ 			set_opt(sbi->s_mount_opt, DISCARD);
++ 			break;
++@@ -2693,6 +2699,16 @@
++ 		goto failed_mount;
++ 	}
++ 
+++#ifdef __BIG_ENDIAN
+++	if (bigendian_extents == 0) {
+++		printk(KERN_ERR "EXT4-fs: extents feature is not guaranteed to "
+++		       "work on big-endian systems. Use \"bigendian_extents\" "
+++		       "mount option to override.\n");
+++		goto failed_mount;
+++	}
+++#endif
+++
+++
++ #ifdef CONFIG_PROC_FS
++ 	if (ext4_proc_root)
++ 		sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root);
+diff --git a/ldiskfs/kernel_patches/patches/ext4-disable-mb-cache-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-disable-mb-cache-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..77fb42d
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-disable-mb-cache-2.6.32-vanilla.patch
+@@ -0,0 +1,154 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:11:20.317665268 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:24.109662558 +0200
++@@ -827,7 +827,8 @@
++ /*
++  * Mount flags
++  */
++-#define EXT4_MOUNT_OLDALLOC		0x00002  /* Don't use the new Orlov allocator */
+++#define EXT4_MOUNT_NO_MBCACHE		0x00001 /* Disable mbcache */
+++#define EXT4_MOUNT_OLDALLOC		0x00002 /* Don't use the new Orlov allocator */
++ #define EXT4_MOUNT_GRPID		0x00004	/* Create files with directory's group */
++ #define EXT4_MOUNT_DEBUG		0x00008	/* Some debugging messages */
++ #define EXT4_MOUNT_ERRORS_CONT		0x00010	/* Continue on errors */
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:54.885674648 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:11:24.113671273 +0200
++@@ -1118,6 +1118,7 @@
++ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
++ 	Opt_mballoc, Opt_bigendian_extents, Opt_force_over_128tb,
++ 	Opt_extents, Opt_noextents,
+++	Opt_no_mbcache,
++ 	Opt_discard, Opt_nodiscard,
++ };
++ 
++@@ -1190,6 +1191,7 @@
++ 	{Opt_bigendian_extents, "bigendian_extents"},
++ 	{Opt_force_over_128tb, "force_over_128tb"},
++ 	{Opt_mballoc, "mballoc"},
+++	{Opt_no_mbcache, "no_mbcache"},
++ 	{Opt_extents, "extents"},
++ 	{Opt_noextents, "noextents"},
++ 	{Opt_discard, "discard"},
++@@ -1667,6 +1669,9 @@
++ 			}
++ 			clear_opt(sbi->s_mount_opt, EXTENTS);
++ 			break;
+++		case Opt_no_mbcache:
+++			set_opt(sbi->s_mount_opt, NO_MBCACHE);
+++			break;
++ 		default:
++ 			ext4_msg(sb, KERN_ERR,
++ 			       "Unrecognized mount option \"%s\" "
++Index: linux-source-2.6.32/fs/ext4/xattr.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/xattr.c	2012-06-28 12:11:20.321664768 +0200
+++++ linux-source-2.6.32/fs/ext4/xattr.c	2012-06-28 12:11:24.117664822 +0200
++@@ -86,7 +86,8 @@
++ # define ea_bdebug(f...)
++ #endif
++ 
++-static void ext4_xattr_cache_insert(struct buffer_head *);
+++static void ext4_xattr_cache_insert(struct super_block *,
+++				    struct buffer_head *);
++ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
++ 						 struct ext4_xattr_header *,
++ 						 struct mb_cache_entry **);
++@@ -332,7 +333,7 @@
++ 		error = -EIO;
++ 		goto cleanup;
++ 	}
++-	ext4_xattr_cache_insert(bh);
+++	ext4_xattr_cache_insert(inode->i_sb, bh);
++ 	entry = BFIRST(bh);
++ 	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
++ 				      inode);
++@@ -491,7 +492,7 @@
++ 		error = -EIO;
++ 		goto cleanup;
++ 	}
++-	ext4_xattr_cache_insert(bh);
+++	ext4_xattr_cache_insert(inode->i_sb, bh);
++ 	error = ext4_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
++ 
++ cleanup:
++@@ -588,7 +589,9 @@
++ 	struct mb_cache_entry *ce = NULL;
++ 	int error = 0;
++ 
++-	ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
+++	if (!test_opt(inode->i_sb, NO_MBCACHE))
+++		ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev,
+++					bh->b_blocknr);
++ 	error = ext4_journal_get_write_access(handle, bh);
++ 	if (error)
++ 		goto out;
++@@ -987,8 +990,10 @@
++ #define header(x) ((struct ext4_xattr_header *)(x))
++ 
++ 	if (s->base) {
++-		ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
++-					bs->bh->b_blocknr);
+++		if (!test_opt(inode->i_sb, NO_MBCACHE))
+++			ce = mb_cache_entry_get(ext4_xattr_cache,
+++						bs->bh->b_bdev,
+++						bs->bh->b_blocknr);
++ 		error = ext4_journal_get_write_access(handle, bs->bh);
++ 		if (error)
++ 			goto cleanup;
++@@ -1005,7 +1010,7 @@
++ 				if (!IS_LAST_ENTRY(s->first))
++ 					ext4_xattr_rehash(header(s->base),
++ 							  s->here);
++-				ext4_xattr_cache_insert(bs->bh);
+++				ext4_xattr_cache_insert(sb, bs->bh);
++ 			}
++ 			unlock_buffer(bs->bh);
++ 			if (error == -EIO)
++@@ -1088,7 +1093,8 @@
++ 				if (error)
++ 					goto cleanup_dquot;
++ 			}
++-			mb_cache_entry_release(ce);
+++			if (ce)
+++				mb_cache_entry_release(ce);
++ 			ce = NULL;
++ 		} else if (bs->bh && s->base == bs->bh->b_data) {
++ 			/* We were modifying this block in-place. */
++@@ -1132,7 +1138,7 @@
++ 			memcpy(new_bh->b_data, s->base, new_bh->b_size);
++ 			set_buffer_uptodate(new_bh);
++ 			unlock_buffer(new_bh);
++-			ext4_xattr_cache_insert(new_bh);
+++			ext4_xattr_cache_insert(sb, new_bh);
++ 			error = ext4_handle_dirty_metadata(handle,
++ 							   inode, new_bh);
++ 			if (error)
++@@ -1754,12 +1760,15 @@
++  * Returns 0, or a negative error number on failure.
++  */
++ static void
++-ext4_xattr_cache_insert(struct buffer_head *bh)
+++ext4_xattr_cache_insert(struct super_block *sb, struct buffer_head *bh)
++ {
++ 	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
++ 	struct mb_cache_entry *ce;
++ 	int error;
++ 
+++	if (test_opt(sb, NO_MBCACHE))
+++		return;
+++
++ 	ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
++ 	if (!ce) {
++ 		ea_bdebug(bh, "out of memory");
++@@ -1832,6 +1841,8 @@
++ 	__u32 hash = le32_to_cpu(header->h_hash);
++ 	struct mb_cache_entry *ce;
++ 
+++	if (test_opt(inode->i_sb, NO_MBCACHE))
+++		return NULL;
++ 	if (!header->h_hash)
++ 		return NULL;  /* never share */
++ 	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
+diff --git a/ldiskfs/kernel_patches/patches/ext4-dynlocks-common-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-dynlocks-common-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..07fad0b
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-dynlocks-common-2.6.32-vanilla.patch
+@@ -0,0 +1,350 @@
++Index: linux-source-2.6.32/fs/ext4/dynlocks.c
++===================================================================
++--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++++ linux-source-2.6.32/fs/ext4/dynlocks.c	2012-06-28 12:10:45.425668386 +0200
++@@ -0,0 +1,236 @@
+++/*
+++ * Dynamic Locks
+++ *
+++ * struct dynlock is lockspace
+++ * one may request lock (exclusive or shared) for some value
+++ * in that lockspace
+++ *
+++ */
+++
+++#include <linux/dynlocks.h>
+++#include <linux/module.h>
+++#include <linux/slab.h>
+++#include <linux/sched.h>
+++
+++#define DYNLOCK_HANDLE_MAGIC	0xd19a10c
+++#define DYNLOCK_HANDLE_DEAD	0xd1956ee
+++#define DYNLOCK_LIST_MAGIC	0x11ee91e6
+++
+++static struct kmem_cache * dynlock_cachep = NULL;
+++
+++struct dynlock_handle {
+++	unsigned 		dh_magic;
+++	struct list_head	dh_list;
+++	unsigned long		dh_value;	/* lock value */
+++	int			dh_refcount;	/* number of users */
+++	int			dh_readers;
+++	int			dh_writers;
+++	int			dh_pid;		/* holder of the lock */
+++	wait_queue_head_t	dh_wait;
+++};
+++
+++int __init dynlock_cache_init(void)
+++{
+++	int rc = 0;
+++
+++	/* printk(KERN_INFO "init dynlocks cache\n"); */
+++	dynlock_cachep = kmem_cache_create("dynlock_cache",
+++					 sizeof(struct dynlock_handle),
+++					 0,
+++					 SLAB_HWCACHE_ALIGN,
+++					 NULL);
+++	if (dynlock_cachep == NULL) {
+++		printk(KERN_ERR "Not able to create dynlock cache");
+++		rc = -ENOMEM;
+++	}
+++	return rc;
+++}
+++
+++void dynlock_cache_exit(void)
+++{
+++	/* printk(KERN_INFO "exit dynlocks cache\n"); */
+++	kmem_cache_destroy(dynlock_cachep);
+++}
+++
+++/*
+++ * dynlock_init
+++ *
+++ * initialize lockspace
+++ *
+++ */
+++void dynlock_init(struct dynlock *dl)
+++{
+++	spin_lock_init(&dl->dl_list_lock);
+++	INIT_LIST_HEAD(&dl->dl_list);
+++	dl->dl_magic = DYNLOCK_LIST_MAGIC;
+++}
+++EXPORT_SYMBOL(dynlock_init);
+++
+++/*
+++ * dynlock_lock
+++ *
+++ * acquires lock (exclusive or shared) in specified lockspace
+++ * each lock in lockspace is allocated separately, so user have
+++ * to specify GFP flags.
+++ * routine returns pointer to lock. this pointer is intended to
+++ * be passed to dynlock_unlock
+++ *
+++ */
+++struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
+++				    enum dynlock_type lt, gfp_t gfp)
+++{
+++	struct dynlock_handle *nhl = NULL;
+++	struct dynlock_handle *hl;
+++
+++	BUG_ON(dl == NULL);
+++	BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
+++
+++repeat:
+++	/* find requested lock in lockspace */
+++	spin_lock(&dl->dl_list_lock);
+++	BUG_ON(dl->dl_list.next == NULL);
+++	BUG_ON(dl->dl_list.prev == NULL);
+++	list_for_each_entry(hl, &dl->dl_list, dh_list) {
+++		BUG_ON(hl->dh_list.next == NULL);
+++		BUG_ON(hl->dh_list.prev == NULL);
+++		BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
+++		if (hl->dh_value == value) {
+++			/* lock is found */
+++			if (nhl) {
+++				/* someone else just allocated
+++				 * lock we didn't find and just created
+++				 * so, we drop our lock
+++				 */
+++				kmem_cache_free(dynlock_cachep, nhl);
+++				nhl = NULL;
+++			}
+++			hl->dh_refcount++;
+++			goto found;
+++		}
+++	}
+++	/* lock not found */
+++	if (nhl) {
+++		/* we already have allocated lock. use it */
+++		hl = nhl;
+++		nhl = NULL;
+++		list_add(&hl->dh_list, &dl->dl_list);
+++		goto found;
+++	}
+++	spin_unlock(&dl->dl_list_lock);
+++
+++	/* lock not found and we haven't allocated lock yet. allocate it */
+++	nhl = kmem_cache_alloc(dynlock_cachep, gfp);
+++	if (nhl == NULL)
+++		return NULL;
+++	nhl->dh_refcount = 1;
+++	nhl->dh_value = value;
+++	nhl->dh_readers = 0;
+++	nhl->dh_writers = 0;
+++	nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
+++	init_waitqueue_head(&nhl->dh_wait);
+++
+++	/* while lock is being allocated, someone else may allocate it
+++	 * and put onto to list. check this situation
+++	 */
+++	goto repeat;
+++
+++found:
+++	if (lt == DLT_WRITE) {
+++		/* exclusive lock: user don't want to share lock at all
+++		 * NOTE: one process may take the same lock several times
+++		 * this functionaly is useful for rename operations */
+++		while ((hl->dh_writers && hl->dh_pid != current->pid) ||
+++				hl->dh_readers) {
+++			spin_unlock(&dl->dl_list_lock);
+++			wait_event(hl->dh_wait,
+++				hl->dh_writers == 0 && hl->dh_readers == 0);
+++			spin_lock(&dl->dl_list_lock);
+++		}
+++		hl->dh_writers++;
+++	} else {
+++		/* shared lock: user do not want to share lock with writer */
+++		while (hl->dh_writers) {
+++			spin_unlock(&dl->dl_list_lock);
+++			wait_event(hl->dh_wait, hl->dh_writers == 0);
+++			spin_lock(&dl->dl_list_lock);
+++		}
+++		hl->dh_readers++;
+++	}
+++	hl->dh_pid = current->pid;
+++	spin_unlock(&dl->dl_list_lock);
+++
+++	return hl;
+++}
+++EXPORT_SYMBOL(dynlock_lock);
+++
+++
+++/*
+++ * dynlock_unlock
+++ *
+++ * user have to specify lockspace (dl) and pointer to lock structure
+++ * returned by dynlock_lock()
+++ *
+++ */
+++void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
+++{
+++	int wakeup = 0;
+++
+++	BUG_ON(dl == NULL);
+++	BUG_ON(hl == NULL);
+++	BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
+++
+++	if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
+++		printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
+++
+++	BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
+++	BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
+++
+++	spin_lock(&dl->dl_list_lock);
+++	if (hl->dh_writers) {
+++		BUG_ON(hl->dh_readers != 0);
+++		hl->dh_writers--;
+++		if (hl->dh_writers == 0)
+++			wakeup = 1;
+++	} else if (hl->dh_readers) {
+++		hl->dh_readers--;
+++		if (hl->dh_readers == 0)
+++			wakeup = 1;
+++	} else {
+++		BUG();
+++	}
+++	if (wakeup) {
+++		hl->dh_pid = 0;
+++		wake_up(&hl->dh_wait);
+++	}
+++	if (--(hl->dh_refcount) == 0) {
+++		hl->dh_magic = DYNLOCK_HANDLE_DEAD;
+++		list_del(&hl->dh_list);
+++		kmem_cache_free(dynlock_cachep, hl);
+++	}
+++	spin_unlock(&dl->dl_list_lock);
+++}
+++EXPORT_SYMBOL(dynlock_unlock);
+++
+++int dynlock_is_locked(struct dynlock *dl, unsigned long value)
+++{
+++	struct dynlock_handle *hl;
+++	int result = 0;
+++
+++	/* find requested lock in lockspace */
+++	spin_lock(&dl->dl_list_lock);
+++	BUG_ON(dl->dl_list.next == NULL);
+++	BUG_ON(dl->dl_list.prev == NULL);
+++	list_for_each_entry(hl, &dl->dl_list, dh_list) {
+++		BUG_ON(hl->dh_list.next == NULL);
+++		BUG_ON(hl->dh_list.prev == NULL);
+++		BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
+++		if (hl->dh_value == value && hl->dh_pid == current->pid) {
+++			/* lock is found */
+++			result = 1;
+++			break;
+++		}
+++	}
+++	spin_unlock(&dl->dl_list_lock);
+++	return result;
+++}
+++EXPORT_SYMBOL(dynlock_is_locked);
++Index: linux-source-2.6.32/include/linux/dynlocks.h
++===================================================================
++--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++++ linux-source-2.6.32/include/linux/dynlocks.h	2012-06-28 12:10:45.425668386 +0200
++@@ -0,0 +1,34 @@
+++#ifndef _LINUX_DYNLOCKS_H
+++#define _LINUX_DYNLOCKS_H
+++
+++#include <linux/list.h>
+++#include <linux/wait.h>
+++
+++struct dynlock_handle;
+++
+++/*
+++ * lock's namespace:
+++ *   - list of locks
+++ *   - lock to protect this list
+++ */
+++struct dynlock {
+++	unsigned		dl_magic;
+++	struct list_head	dl_list;
+++	spinlock_t		dl_list_lock;
+++};
+++
+++enum dynlock_type {
+++	DLT_WRITE,
+++	DLT_READ
+++};
+++
+++int dynlock_cache_init(void);
+++void dynlock_cache_exit(void);
+++void dynlock_init(struct dynlock *dl);
+++struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
+++				    enum dynlock_type lt, gfp_t gfp);
+++void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
+++int dynlock_is_locked(struct dynlock *dl, unsigned long value);
+++
+++#endif
+++
++Index: linux-source-2.6.32/fs/ext4/Makefile
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/Makefile	2012-06-28 12:09:59.685666701 +0200
+++++ linux-source-2.6.32/fs/ext4/Makefile	2012-06-28 12:10:45.425668386 +0200
++@@ -7,7 +7,7 @@
++ ext4-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
++ 		ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
++ 		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
++-		mmp.o
+++		mmp.o dynlocks.o
++ 
++ ext4-$(CONFIG_EXT4_FS_XATTR)		+= xattr.o xattr_user.o xattr_trusted.o
++ ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:42.285668861 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:45.429666724 +0200
++@@ -4082,32 +4082,37 @@
++ 		return err;
++ 	ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
++ 	if (!ext4_kset)
++-		goto out4;
+++		goto out5;
++ 	ext4_proc_root = proc_mkdir("fs/ext4", NULL);
++ 	err = init_ext4_mballoc();
++ 	if (err)
++-		goto out3;
+++		goto out4;
++ 
++ 	err = init_ext4_xattr();
++ 	if (err)
++-		goto out2;
+++		goto out3;
++ 	err = init_inodecache();
++ 	if (err)
+++		goto out2;
+++	err = dynlock_cache_init();
+++	if (err)
++ 		goto out1;
++ 	err = register_filesystem(&ext4_fs_type);
++ 	if (err)
++ 		goto out;
++ 	return 0;
++ out:
++-	destroy_inodecache();
+++	dynlock_cache_exit();
++ out1:
++-	exit_ext4_xattr();
+++	destroy_inodecache();
++ out2:
++-	exit_ext4_mballoc();
+++	exit_ext4_xattr();
++ out3:
+++	exit_ext4_mballoc();
+++out4:
++ 	remove_proc_entry("fs/ext4", NULL);
++ 	kset_unregister(ext4_kset);
++-out4:
+++out5:
++ 	exit_ext4_system_zone();
++ 	return err;
++ }
++@@ -4115,6 +4120,7 @@
++ static void __exit exit_ext4_fs(void)
++ {
++ 	unregister_filesystem(&ext4_fs_type);
+++	dynlock_cache_exit();
++ 	destroy_inodecache();
++ 	exit_ext4_xattr();
++ 	exit_ext4_mballoc();
+diff --git a/ldiskfs/kernel_patches/patches/ext4-export-64bit-name-hash-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-export-64bit-name-hash-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..6745d2f
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-export-64bit-name-hash-2.6.32-vanilla.patch
+@@ -0,0 +1,134 @@
++Index: linux-source-2.6.32/fs/ext4/dir.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/dir.c	2012-06-28 12:11:16.361665139 +0200
+++++ linux-source-2.6.32/fs/ext4/dir.c	2012-06-28 12:11:34.597665360 +0200
++@@ -246,22 +246,50 @@
++ 	return ret;
++ }
++ 
+++static inline int is_32bit_api(void)
+++{
+++#ifdef HAVE_IS_COMPAT_TASK
+++        return is_compat_task();
+++#else
+++        return (BITS_PER_LONG == 32);
+++#endif
+++}
+++
++ /*
++  * These functions convert from the major/minor hash to an f_pos
++  * value.
++  *
++- * Currently we only use major hash numer.  This is unfortunate, but
++- * on 32-bit machines, the same VFS interface is used for lseek and
++- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
++- * lseek/telldir/seekdir will blow out spectacularly, and from within
++- * the ext2 low-level routine, we don't know if we're being called by
++- * a 64-bit version of the system call or the 32-bit version of the
++- * system call.  Worse yet, NFSv2 only allows for a 32-bit readdir
++- * cookie.  Sigh.
+++ * Up layer (OSD) should specify O_32BITHASH or O_64BITHASH explicitly.
+++ * On the other hand, we allow ldiskfs to be mounted directly on both 32-bit
+++ * and 64-bit nodes, under such case, neither O_32BITHASH nor O_64BITHASH is
+++ * specified.
++  */
++-#define hash2pos(major, minor)	(major >> 1)
++-#define pos2maj_hash(pos)	((pos << 1) & 0xffffffff)
++-#define pos2min_hash(pos)	(0)
+++static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
+++{
+++	if ((filp->f_flags & O_32BITHASH) ||
+++	    (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
+++		return (major >> 1);
+++	else
+++		return (((__u64)(major >> 1) << 32) | (__u64)minor);
+++}
+++
+++static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
+++{
+++	if ((filp->f_flags & O_32BITHASH) ||
+++	    (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
+++		return ((pos << 1) & 0xffffffff);
+++	else
+++		return (((pos >> 32) << 1) & 0xffffffff);
+++}
+++
+++static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
+++{
+++	if ((filp->f_flags & O_32BITHASH) ||
+++	    (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
+++		return (0);
+++	else
+++		return (pos & 0xffffffff);
+++}
++ 
++ /*
++  * This structure holds the nodes of the red-black tree used to store
++@@ -322,15 +350,16 @@
++ }
++ 
++ 
++-static struct dir_private_info *ext4_htree_create_dir_info(loff_t pos)
+++static struct dir_private_info *
+++ext4_htree_create_dir_info(struct file *filp, loff_t pos)
++ {
++ 	struct dir_private_info *p;
++ 
++ 	p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
++ 	if (!p)
++ 		return NULL;
++-	p->curr_hash = pos2maj_hash(pos);
++-	p->curr_minor_hash = pos2min_hash(pos);
+++	p->curr_hash = pos2maj_hash(filp, pos);
+++	p->curr_minor_hash = pos2min_hash(filp, pos);
++ 	return p;
++ }
++ 
++@@ -426,7 +455,7 @@
++ 		       "null fname?!?\n");
++ 		return 0;
++ 	}
++-	curr_pos = hash2pos(fname->hash, fname->minor_hash);
+++	curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
++ 	while (fname) {
++ 		error = filldir(dirent, fname->name,
++ 				fname->name_len, curr_pos,
++@@ -451,7 +480,7 @@
++ 	int	ret;
++ 
++ 	if (!info) {
++-		info = ext4_htree_create_dir_info(filp->f_pos);
+++		info = ext4_htree_create_dir_info(filp, filp->f_pos);
++ 		if (!info)
++ 			return -ENOMEM;
++ 		filp->private_data = info;
++@@ -465,8 +494,8 @@
++ 		free_rb_tree_fname(&info->root);
++ 		info->curr_node = NULL;
++ 		info->extra_fname = NULL;
++-		info->curr_hash = pos2maj_hash(filp->f_pos);
++-		info->curr_minor_hash = pos2min_hash(filp->f_pos);
+++		info->curr_hash = pos2maj_hash(filp, filp->f_pos);
+++		info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
++ 	}
++ 
++ 	/*
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:11:24.109662558 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:34.597665360 +0200
++@@ -808,6 +808,14 @@
++ 	__u64 i_fs_version;
++ };
++ 
+++#ifndef O_32BITHASH
+++# define O_32BITHASH	0x10000000
+++#endif
+++
+++#ifndef O_64BITHASH
+++# define O_64BITHASH	0x20000000
+++#endif
+++
++ #define HAVE_DISK_INODE_VERSION
++ 
++ /*
+diff --git a/ldiskfs/kernel_patches/patches/ext4-ext_generation-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-ext_generation-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..fc65734
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-ext_generation-2.6.32-vanilla.patch
+@@ -0,0 +1,48 @@
++Index: linux-source-2.6.32/fs/ext4/ext4_extents.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4_extents.h	2012-06-28 12:08:35.493670779 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4_extents.h	2012-06-28 12:09:30.485668675 +0200
++@@ -194,6 +194,11 @@
++ 	return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
++ }
++ 
+++static inline void ext4_ext_tree_changed(struct inode *inode)
+++{
+++	EXT4_I(inode)->i_ext_generation++;
+++}
+++
++ static inline void
++ ext4_ext_invalidate_cache(struct inode *inode)
++ {
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:09:19.417666703 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:09:30.485668675 +0200
++@@ -756,6 +756,7 @@
++ 	struct inode vfs_inode;
++ 	struct jbd2_inode jinode;
++ 
+++	unsigned long i_ext_generation;
++ 	struct ext4_ext_cache i_cached_extent;
++ 	/*
++ 	 * File creation time. Its function is same as that of
++Index: linux-source-2.6.32/fs/ext4/extents.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/extents.c	2012-06-28 12:08:35.401666340 +0200
+++++ linux-source-2.6.32/fs/ext4/extents.c	2012-06-28 12:09:30.489673456 +0200
++@@ -1752,6 +1752,7 @@
++ 		ext4_ext_drop_refs(npath);
++ 		kfree(npath);
++ 	}
+++	ext4_ext_tree_changed(inode);
++ 	ext4_ext_invalidate_cache(inode);
++ 	return err;
++ }
++@@ -2380,6 +2381,7 @@
++ 		}
++ 	}
++ out:
+++	ext4_ext_tree_changed(inode);
++ 	ext4_ext_drop_refs(path);
++ 	kfree(path);
++ 	if (err == -EAGAIN)
+diff --git a/ldiskfs/kernel_patches/patches/ext4-extents-mount-option-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-extents-mount-option-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..c7f1de8
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-extents-mount-option-2.6.32-vanilla.patch
+@@ -0,0 +1,168 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:42.265665313 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:54.877664607 +0200
++@@ -849,6 +849,7 @@
++ #define EXT4_MOUNT_QUOTA		0x80000 /* Some quota option set */
++ #define EXT4_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
++ #define EXT4_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
+++#define EXT4_MOUNT_EXTENTS		0x400000 /* Extents support */
++ #define EXT4_MOUNT_JOURNAL_CHECKSUM	0x800000 /* Journal checksums */
++ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT	0x1000000 /* Journal Async Commit */
++ #define EXT4_MOUNT_I_VERSION            0x2000000 /* i_version support */
++Index: linux-source-2.6.32/fs/ext4/ext4_jbd2.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4_jbd2.h	2012-06-28 12:10:23.325664479 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4_jbd2.h	2012-06-28 12:10:54.877664607 +0200
++@@ -33,7 +33,7 @@
++ 
++ #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb)				\
++ 	(EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)   \
++-	 ? 27U : 8U)
+++	 || test_opt(sb, EXTENTS) ? 27U : 8U)
++ 
++ #define ext4_journal_dirty_metadata(handle, bh)  \
++ 		ext4_handle_dirty_metadata(handle, NULL, bh)
++Index: linux-source-2.6.32/fs/ext4/extents.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/extents.c	2012-06-28 12:10:23.329669991 +0200
+++++ linux-source-2.6.32/fs/ext4/extents.c	2012-06-28 12:10:54.881664295 +0200
++@@ -2449,7 +2449,7 @@
++ 	 * possible initialization would be here
++ 	 */
++ 
++-	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
+++	if (test_opt(sb, EXTENTS)) {
++ #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
++ 		printk(KERN_INFO "EXT4-fs: file extents enabled");
++ #ifdef AGGRESSIVE_TEST
++@@ -2476,7 +2476,7 @@
++  */
++ void ext4_ext_release(struct super_block *sb)
++ {
++-	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
+++	if (!test_opt(sb, EXTENTS))
++ 		return;
++ 
++ #ifdef EXTENTS_STATS
++Index: linux-source-2.6.32/fs/ext4/ialloc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:10:30.005662279 +0200
+++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:10:54.881664295 +0200
++@@ -1047,7 +1047,7 @@
++ 	if (err)
++ 		goto fail_free_drop;
++ 
++-	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
+++	if (test_opt(sb, EXTENTS)) {
++ 		/* set extent flag only for directory, file and normal symlink*/
++ 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
++ 			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
++Index: linux-source-2.6.32/fs/ext4/migrate.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/migrate.c	2012-06-28 12:08:18.565662551 +0200
+++++ linux-source-2.6.32/fs/ext4/migrate.c	2012-06-28 12:10:54.881664295 +0200
++@@ -459,13 +459,10 @@
++ 	unsigned long max_entries;
++ 	__u32 goal;
++ 
++-	/*
++-	 * If the filesystem does not support extents, or the inode
++-	 * already is extent-based, error out.
++-	 */
++-	if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
++-				       EXT4_FEATURE_INCOMPAT_EXTENTS) ||
++-	    (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+++	if (!test_opt(inode->i_sb, EXTENTS))
+++		/*
+++		 * if mounted with noextents we don't allow the migrate
+++		 */
++ 		return -EINVAL;
++ 
++ 	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:45.429666724 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:54.885674648 +0200
++@@ -895,6 +895,8 @@
++ 		seq_puts(seq, ",journal_checksum");
++ 	if (test_opt(sb, NOBH))
++ 		seq_puts(seq, ",nobh");
+++	if (!test_opt(sb, EXTENTS))
+++		seq_puts(seq, ",noextents");
++ 	if (test_opt(sb, I_VERSION))
++ 		seq_puts(seq, ",i_version");
++ 	if (!test_opt(sb, DELALLOC))
++@@ -1115,6 +1117,7 @@
++ 	Opt_block_validity, Opt_noblock_validity,
++ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
++ 	Opt_mballoc, Opt_bigendian_extents, Opt_force_over_128tb,
+++	Opt_extents, Opt_noextents,
++ 	Opt_discard, Opt_nodiscard,
++ };
++ 
++@@ -1187,6 +1190,8 @@
++ 	{Opt_bigendian_extents, "bigendian_extents"},
++ 	{Opt_force_over_128tb, "force_over_128tb"},
++ 	{Opt_mballoc, "mballoc"},
+++	{Opt_extents, "extents"},
+++	{Opt_noextents, "noextents"},
++ 	{Opt_discard, "discard"},
++ 	{Opt_nodiscard, "nodiscard"},
++ 	{Opt_err, NULL},
++@@ -1231,6 +1236,7 @@
++ 	int qtype, qfmt;
++ 	char *qname;
++ #endif
+++	ext4_fsblk_t last_block;
++ 
++ 	if (!options)
++ 		return 1;
++@@ -1635,6 +1641,32 @@
++ 		case Opt_force_over_128tb:
++ 			force_over_128tb = 1;
++ 			break;
+++		case Opt_extents:
+++			if (!EXT4_HAS_INCOMPAT_FEATURE(sb,
+++					EXT4_FEATURE_INCOMPAT_EXTENTS)) {
+++				ext4_warning(sb, "extents feature not enabled "
+++						 "on this filesystem, use tune2fs");
+++				return 0;
+++			}
+++			set_opt(sbi->s_mount_opt, EXTENTS);
+++			break;
+++		case Opt_noextents:
+++			/*
+++			 * When e2fsprogs support resizing an already existing
+++			 * ext4 file system to greater than 2**32 we need to
+++			 * add support to block allocator to handle growing
+++			 * already existing block  mapped inode so that blocks
+++			 * allocated for them fall within 2**32
+++			 */
+++			last_block = ext4_blocks_count(sbi->s_es) - 1;
+++			if (last_block  > 0xffffffffULL) {
+++				printk(KERN_ERR "EXT4-fs: Filesystem too "
+++						"large to mount with "
+++						"-o noextents options\n");
+++				return 0;
+++			}
+++			clear_opt(sbi->s_mount_opt, EXTENTS);
+++			break;
++ 		default:
++ 			ext4_msg(sb, KERN_ERR,
++ 			       "Unrecognized mount option \"%s\" "
++@@ -2499,6 +2531,14 @@
++ 	set_opt(sbi->s_mount_opt, BARRIER);
++ 
++ 	/*
+++	 * turn on extents feature by default in ext4 filesystem
+++	 * only if feature flag already set by mkfs or tune2fs.
+++	 * Use -o noextents to turn it off
+++	 */
+++	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
+++		set_opt(sbi->s_mount_opt, EXTENTS);
+++
+++	/*
++ 	 * enable delayed allocation by default
++ 	 * Use -o nodelalloc to turn it off
++ 	 */
+diff --git a/ldiskfs/kernel_patches/patches/ext4-fiemap-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-fiemap-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..c462357
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-fiemap-2.6.32-vanilla.patch
+@@ -0,0 +1,111 @@
++This patch adds direct EXT4_IOC_FIEMAP support to ldiskfs, for Lustre to call
++without having to go through do_vfs_ioctl() (which isn't exported, and has a
++number of other ioctls which are not suitable for Lustre). The actual FIEMAP
++support is already in the kernel/ext4 for normal usage.
++
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:54.877664607 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:58.337671542 +0200
++@@ -473,7 +473,7 @@
++ #define EXT4_IOC_GROUP_ADD		_IOW('f', 8, struct ext4_new_group_input)
++ #define EXT4_IOC_MIGRATE		_IO('f', 9)
++  /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
++- /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
+++#define EXT4_IOC_FIEMAP			_IOWR('f', 11, struct fiemap)
++ #define EXT4_IOC_ALLOC_DA_BLKS		_IO('f', 12)
++ #define EXT4_IOC_MOVE_EXT		_IOWR('f', 15, struct move_extent)
++ 
++Index: linux-source-2.6.32/fs/ext4/ioctl.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ioctl.c	2012-06-28 12:08:17.325666867 +0200
+++++ linux-source-2.6.32/fs/ext4/ioctl.c	2012-06-28 12:10:58.337671542 +0200
++@@ -18,6 +18,71 @@
++ #include "ext4_jbd2.h"
++ #include "ext4.h"
++ 
+++/* So that the fiemap access checks can't overflow on 32 bit machines. */
+++#define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
+++
+++static int fiemap_check_ranges(struct super_block *sb,
+++			       u64 start, u64 len, u64 *new_len)
+++{
+++	*new_len = len;
+++
+++	if (len == 0)
+++		return -EINVAL;
+++
+++	if (start > sb->s_maxbytes)
+++		return -EFBIG;
+++
+++	/*
+++	 * Shrink request scope to what the fs can actually handle.
+++	 */
+++	if ((len > sb->s_maxbytes) ||
+++	    (sb->s_maxbytes - len) < start)
+++		*new_len = sb->s_maxbytes - start;
+++
+++	return 0;
+++}
+++
+++int ioctl_fiemap(struct inode *inode, struct file *filp, unsigned long arg)
+++{
+++	struct fiemap fiemap;
+++	u64 len;
+++	struct fiemap_extent_info fieinfo = {0, };
+++	struct super_block *sb = inode->i_sb;
+++	int error = 0;
+++
+++	if (copy_from_user(&fiemap, (struct fiemap __user *) arg,
+++			   sizeof(struct fiemap)))
+++		 return -EFAULT;
+++
+++	if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
+++		return -EINVAL;
+++
+++	error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
+++				    &len);
+++	if (error)
+++		return error;
+++
+++	fieinfo.fi_flags = fiemap.fm_flags;
+++	fieinfo.fi_extents_max = fiemap.fm_extent_count;
+++	fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
+++
+++	if (fiemap.fm_extent_count != 0 &&
+++	    !access_ok(VERIFY_WRITE, (void *)arg,
+++		       offsetof(typeof(fiemap), fm_extents[fiemap.fm_extent_count])))
+++		return -EFAULT;
+++
+++	if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
+++		filemap_write_and_wait(inode->i_mapping);
+++
+++	error = ext4_fiemap(inode, &fieinfo, fiemap.fm_start, len);
+++	fiemap.fm_flags = fieinfo.fi_flags;
+++	fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
+++	if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
+++		error = -EFAULT;
+++
+++	return error;
+++}
+++
++ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++ {
++ 	struct inode *inode = filp->f_dentry->d_inode;
++@@ -330,6 +395,9 @@
++ 		mnt_drop_write(filp->f_path.mnt);
++ 		return err;
++ 	}
+++	case EXT4_IOC_FIEMAP: {
+++		return ioctl_fiemap(inode, filp, arg);
+++	}
++ 
++ 	default:
++ 		return -ENOTTY;
++Index: linux-source-2.6.32/fs/ext4/fiemap.h
++===================================================================
++--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++++ linux-source-2.6.32/fs/ext4/fiemap.h	2012-06-28 12:10:58.337671542 +0200
++@@ -0,0 +1,2 @@
+++
+++#include_next <fiemap.h>
+diff --git a/ldiskfs/kernel_patches/patches/ext4-force_over_128tb-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-force_over_128tb-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..487b2cc
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-force_over_128tb-2.6.32-vanilla.patch
+@@ -0,0 +1,56 @@
++Index: linux-2.6.18-164.6.1/fs/ext4/super.c
++===================================================================
++--- linux-2.6.18-164.6.1.orig/fs/ext4/super.c
+++++ linux-2.6.18-164.6.1/fs/ext4/super.c
++@@ -51,6 +51,8 @@
++ 
++ struct proc_dir_entry *ext4_proc_root;
++ 
+++static int force_over_128tb;
+++
++ static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
++ 			     unsigned long journal_devnum);
++ static int ext4_commit_super(struct super_block *sb,
++@@ -1343,6 +1345,7 @@ enum {
++ 	Opt_stripe, Opt_delalloc, Opt_nodelalloc,
++ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
++ 	Opt_iopen, Opt_noiopen, Opt_iopen_nopriv, Opt_bigendian_extents,
+++	Opt_force_over_128tb,
++ };
++ 
++ static match_table_t tokens = {
++@@ -1410,6 +1413,7 @@ static match_table_t tokens = {
++ 	{Opt_auto_da_alloc, "auto_da_alloc"},
++ 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
++ 	{Opt_bigendian_extents, "bigendian_extents"},
+++	{Opt_force_over_128tb, "force_over_128tb"},
++ 	{Opt_err, NULL},
++ };
++ 
++@@ -1837,6 +1841,9 @@ set_qf_format:
++ 			break;
++ 		case Opt_mballoc:
++ 			break;
+++		case Opt_force_over_128tb:
+++			force_over_128tb = 1;
+++			break;
++ 		default:
++ 			printk(KERN_ERR
++ 			       "EXT4-fs: Unrecognized mount option \"%s\" "
++@@ -2692,6 +2699,16 @@ static int ext4_fill_super(struct super_
++ 		goto failed_mount;
++ 	}
++ 
+++	if (ext4_blocks_count(es) > (8ULL << 32)) {
+++		if (force_over_128tb == 0) {
+++			printk(KERN_ERR "EXT4-fs does not support filesystems "
+++			       "greater than 128TB and can cause data corruption."
+++			       "Use \"force_over_128tb\" mount option to override."
+++			       "\n");
+++			goto failed_mount;
+++		}
+++	}
+++
++ 	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
++ 		goto cantfind_ext4;
++ 
+diff --git a/ldiskfs/kernel_patches/patches/ext4-hash-indexed-dir-dotdot-update-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-hash-indexed-dir-dotdot-update-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..8ff4099
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-hash-indexed-dir-dotdot-update-2.6.32-vanilla.patch
+@@ -0,0 +1,87 @@
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:42.283981062 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:49.061664012 +0200
++@@ -1526,6 +1526,72 @@
++ 	return retval;
++ }
++ 
+++/* update ".." for hash-indexed directory, split the item "." if necessary */
+++static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
+++				 struct inode *inode)
+++{
+++	struct inode * dir = dentry->d_parent->d_inode;
+++	struct buffer_head * dir_block;
+++	struct ext4_dir_entry_2 * de;
+++	int len, journal = 0, err = 0;
+++
+++	if (IS_ERR(handle))
+++		return PTR_ERR(handle);
+++
+++	if (IS_DIRSYNC(dir))
+++		handle->h_sync = 1;
+++
+++	dir_block = ext4_bread(handle, dir, 0, 0, &err);
+++	if (!dir_block)
+++		goto out;
+++
+++	de = (struct ext4_dir_entry_2 *)dir_block->b_data;
+++	/* the first item must be "." */
+++	assert(de->name_len == 1 && de->name[0] == '.');
+++	len = le16_to_cpu(de->rec_len);
+++	assert(len >= EXT4_DIR_REC_LEN(1));
+++	if (len > EXT4_DIR_REC_LEN(1)) {
+++		BUFFER_TRACE(dir_block, "get_write_access");
+++		err = ext4_journal_get_write_access(handle, dir_block);
+++		if (err)
+++			goto out_journal;
+++
+++		journal = 1;
+++		de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
+++	}
+++
+++	len -= EXT4_DIR_REC_LEN(1);
+++	assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
+++	de = (struct ext4_dir_entry_2 *)
+++			((char *) de + le16_to_cpu(de->rec_len));
+++	if (!journal) {
+++		BUFFER_TRACE(dir_block, "get_write_access");
+++		err = ext4_journal_get_write_access(handle, dir_block);
+++		if (err)
+++			goto out_journal;
+++	}
+++
+++	de->inode = cpu_to_le32(inode->i_ino);
+++	if (len > 0)
+++		de->rec_len = cpu_to_le16(len);
+++	else
+++		assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
+++	de->name_len = 2;
+++	strcpy (de->name, "..");
+++	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
+++
+++out_journal:
+++	if (journal) {
+++		BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
+++		err = ext4_handle_dirty_metadata(handle, dir, dir_block);
+++		ext4_mark_inode_dirty(handle, dir);
+++	}
+++	brelse (dir_block);
+++
+++out:
+++	return err;
+++}
+++
++ /*
++  *	ext4_add_entry()
++  *
++@@ -1553,6 +1619,9 @@
++ 	if (!dentry->d_name.len)
++ 		return -EINVAL;
++ 	if (is_dx(dir)) {
+++		if (dentry->d_name.len == 2 &&
+++		    memcmp(dentry->d_name.name, "..", 2) == 0)
+++			return ext4_update_dotdot(handle, dentry, inode);
++ 		retval = ext4_dx_add_entry(handle, dentry, inode);
++ 		if (!retval || (retval != ERR_BAD_DX_DIR))
++ 			return retval;
+diff --git a/ldiskfs/kernel_patches/patches/ext4-inode-version-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-inode-version-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..3d13724
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-inode-version-2.6.32-vanilla.patch
+@@ -0,0 +1,63 @@
++Index: linux-source-2.6.32/fs/ext4/inode.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:09:14.201666378 +0200
+++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:09:34.579776667 +0200
++@@ -4985,11 +4985,11 @@
++ 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
++ 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
++ 
++-	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
+++	ei->i_fs_version = le32_to_cpu(raw_inode->i_disk_version);
++ 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
++ 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
++-			inode->i_version |=
++-			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
+++			ei->i_fs_version |= (__u64)(le32_to_cpu(raw_inode->i_version_hi))
+++									 << 32;
++ 	}
++ 
++ 	ret = 0;
++@@ -5199,11 +5199,11 @@
++ 		for (block = 0; block < EXT4_N_BLOCKS; block++)
++ 			raw_inode->i_block[block] = ei->i_data[block];
++ 
++-	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
+++	raw_inode->i_disk_version = cpu_to_le32(ei->i_fs_version);
++ 	if (ei->i_extra_isize) {
++ 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
++-			raw_inode->i_version_hi =
++-			cpu_to_le32(inode->i_version >> 32);
+++			raw_inode->i_version_hi = cpu_to_le32(ei->i_fs_version
+++							      >> 32);
++ 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
++ 	}
++ 
++Index: linux-source-2.6.32/fs/ext4/ialloc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:09:23.393677834 +0200
+++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:09:34.581668116 +0200
++@@ -1011,6 +1011,7 @@
++ 	ei->i_dtime = 0;
++ 	ei->i_block_group = group;
++ 	ei->i_last_alloc_group = ~0;
+++	ei->i_fs_version = 0;
++ 
++ 	ext4_set_inode_flags(inode);
++ 	if (IS_DIRSYNC(inode))
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:09:30.485668675 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:09:34.581668116 +0200
++@@ -799,8 +799,12 @@
++ 	 */
++ 	tid_t i_sync_tid;
++ 	tid_t i_datasync_tid;
+++
+++	__u64 i_fs_version;
++ };
++ 
+++#define HAVE_DISK_INODE_VERSION
+++
++ /*
++  * File system states
++  */
+diff --git a/ldiskfs/kernel_patches/patches/ext4-journal-callback-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-journal-callback-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..ee6ff80
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-journal-callback-2.6.32-vanilla.patch
+@@ -0,0 +1,470 @@
++Index: linux-source-2.6.32/fs/ext4/ext4_jbd2.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4_jbd2.h	2012-06-28 12:10:54.877664607 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4_jbd2.h	2012-06-28 12:11:44.185852824 +0200
++@@ -106,6 +106,80 @@
++ #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
++ #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
++ 
+++/**
+++ *   struct ext4_journal_cb_entry - Base structure for callback information.
+++ *
+++ *   This struct is a 'seed' structure for a using with your own callback
+++ *   structs. If you are using callbacks you must allocate one of these
+++ *   or another struct of your own definition which has this struct
+++ *   as it's first element and pass it to ext4_journal_callback_add().
+++ */
+++struct ext4_journal_cb_entry {
+++	/* list information for other callbacks attached to the same handle */
+++	struct list_head jce_list;
+++
+++	/*  Function to call with this callback structure */
+++	void (*jce_func)(struct super_block *sb,
+++			 struct ext4_journal_cb_entry *jce, int error);
+++
+++	/* user data goes here */
+++};
+++
+++/**
+++ * ext4_journal_callback_add: add a function to call after transaction commit
+++ * @handle: active journal transaction handle to register callback on
+++ * @func: callback function to call after the transaction has committed:
+++ *        @sb: superblock of current filesystem for transaction
+++ *        @jce: returned journal callback data
+++ *        @rc: journal state at commit (0 = transaction committed properly)
+++ * @jce: journal callback data (internal and function private data struct)
+++ *
+++ * The registered function will be called in the context of the journal thread
+++ * after the transaction for which the handle was created has completed.
+++ *
+++ * No locks are held when the callback function is called, so it is safe to
+++ * call blocking functions from within the callback, but the callback should
+++ * not block or run for too long, or the filesystem will be blocked waiting for
+++ * the next transaction to commit. No journaling functions can be used, or
+++ * there is a risk of deadlock.
+++ *
+++ * There is no guaranteed calling order of multiple registered callbacks on
+++ * the same transaction.
+++ */
+++static inline void ext4_journal_callback_add(handle_t *handle,
+++			void (*func)(struct super_block *sb,
+++				     struct ext4_journal_cb_entry *jce,
+++				     int rc),
+++			struct ext4_journal_cb_entry *jce)
+++{
+++	struct ext4_sb_info *sbi =
+++			EXT4_SB(handle->h_transaction->t_journal->j_private);
+++
+++	/* Add the jce to transaction's private list */
+++	jce->jce_func = func;
+++	spin_lock(&sbi->s_md_lock);
+++	list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list);
+++	spin_unlock(&sbi->s_md_lock);
+++}
+++
+++/**
+++ * ext4_journal_callback_del: delete a registered callback
+++ * @handle: active journal transaction handle on which callback was registered
+++ * @jce: registered journal callback entry to unregister
+++ */
+++static inline void ext4_journal_callback_del(handle_t *handle,
+++					     struct ext4_journal_cb_entry *jce)
+++{
+++	struct ext4_sb_info *sbi =
+++			EXT4_SB(handle->h_transaction->t_journal->j_private);
+++
+++	spin_lock(&sbi->s_md_lock);
+++	list_del_init(&jce->jce_list);
+++	spin_unlock(&sbi->s_md_lock);
+++}
+++
+++#define HAVE_EXT4_JOURNAL_CALLBACK_ADD
+++
++ int
++ ext4_mark_iloc_dirty(handle_t *handle,
++ 		     struct inode *inode,
++Index: linux-source-2.6.32/fs/ext4/mballoc.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.h	2012-06-28 12:11:12.525665254 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.h	2012-06-28 12:11:44.185852824 +0200
++@@ -96,23 +96,24 @@
++  */
++ #define MB_DEFAULT_GROUP_PREALLOC	512
++ 
++-
++ struct ext4_free_data {
++-	/* this links the free block information from group_info */
++-	struct rb_node node;
+++	/* MUST be the first member */
+++	struct ext4_journal_cb_entry	efd_jce;
++ 
++-	/* this links the free block information from ext4_sb_info */
++-	struct list_head list;
+++	/* ext4_free_data private data starts from here */
+++
+++	/* this links the free block information from group_info */
+++	struct rb_node		efd_node;
++ 
++ 	/* group which free block extent belongs */
++-	ext4_group_t group;
+++	ext4_group_t		efd_group;
++ 
++ 	/* free block extent */
++-	ext4_grpblk_t start_blk;
++-	ext4_grpblk_t count;
+++	ext4_grpblk_t		efd_start_blk;
+++	ext4_grpblk_t		efd_count;
++ 
++ 	/* transaction which freed this extent */
++-	tid_t	t_tid;
+++	tid_t			efd_tid;
++ };
++ 
++ struct ext4_prealloc_space {
++Index: linux-source-2.6.32/fs/ext4/mballoc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:11:38.101746741 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:11:44.193730804 +0200
++@@ -21,6 +21,7 @@
++  * mballoc.c contains the multiblocks allocation routines
++  */
++ 
+++#include "ext4_jbd2.h"
++ #include "mballoc.h"
++ #include <linux/debugfs.h>
++ #include <trace/events/ext4.h>
++@@ -336,12 +337,12 @@
++  */
++ static struct kmem_cache *ext4_pspace_cachep;
++ static struct kmem_cache *ext4_ac_cachep;
++-static struct kmem_cache *ext4_free_ext_cachep;
+++static struct kmem_cache *ext4_free_data_cachep;
++ static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++ 					ext4_group_t group);
++ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
++ 						ext4_group_t group);
++-static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
+++static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int error);
++ 
++ static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
++ {
++@@ -2652,8 +2653,6 @@
++ 		}
++ 	}
++ 
++-	if (sbi->s_journal)
++-		sbi->s_journal->j_commit_callback = release_blocks_on_commit;
++ 	return 0;
++ }
++ 
++@@ -2745,62 +2744,52 @@
++  * This function is called by the jbd2 layer once the commit has finished,
++  * so we know we can free the blocks that were released with that commit.
++  */
++-static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
+++static void ext4_free_data_callback(struct super_block *sb,
+++				   struct ext4_journal_cb_entry *jce,
+++				   int rc)
++ {
++-	struct super_block *sb = journal->j_private;
+++	struct ext4_free_data *entry = (struct ext4_free_data *)jce;
++ 	struct ext4_buddy e4b;
++ 	struct ext4_group_info *db;
++ 	int err, count = 0, count2 = 0;
++-	struct ext4_free_data *entry;
++-	struct list_head *l, *ltmp;
++-
++-	list_for_each_safe(l, ltmp, &txn->t_private_list) {
++-		entry = list_entry(l, struct ext4_free_data, list);
++ 
++-		mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
++-			 entry->count, entry->group, entry);
+++	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
+++		entry->efd_count, entry->efd_group, entry);
++ 
++-		if (test_opt(sb, DISCARD)) {
++-			int ret;
++-			ext4_fsblk_t discard_block;
++-
++-			discard_block = entry->start_blk +
++-				ext4_group_first_block_no(sb, entry->group);
++-			trace_ext4_discard_blocks(sb,
++-					(unsigned long long)discard_block,
++-					entry->count);
++-			ret = sb_issue_discard(sb, discard_block, entry->count);
++-			if (ret == EOPNOTSUPP) {
++-				ext4_warning(sb, __func__,
++-					"discard not supported, disabling");
++-				clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
++-			}
+++	if (test_opt(sb, DISCARD)) {
+++		int ret;
+++		ret = ext4_issue_discard(sb, entry->efd_group,
+++			entry->efd_start_blk, entry->efd_count);
+++		if (unlikely(ret == -EOPNOTSUPP)) {
+++			ext4_warning(sb, "discard not supported, "
+++					"disabling");
+++			clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
++ 		}
+++	}
++ 
++-		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
++-		/* we expect to find existing buddy because it's pinned */
++-		BUG_ON(err != 0);
++-
++-		db = e4b.bd_info;
++-		/* there are blocks to put in buddy to make them really free */
++-		count += entry->count;
++-		count2++;
++-		ext4_lock_group(sb, entry->group);
++-		/* Take it out of per group rb tree */
++-		rb_erase(&entry->node, &(db->bb_free_root));
++-		mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
++-
++-		if (!db->bb_free_root.rb_node) {
++-			/* No more items in the per group rb tree
++-			 * balance refcounts from ext4_mb_free_metadata()
++-			 */
++-			page_cache_release(e4b.bd_buddy_page);
++-			page_cache_release(e4b.bd_bitmap_page);
++-		}
++-		ext4_unlock_group(sb, entry->group);
++-		kmem_cache_free(ext4_free_ext_cachep, entry);
++-		ext4_mb_unload_buddy(&e4b);
+++	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
+++	/* we expect to find existing buddy because it's pinned */
+++	BUG_ON(err != 0);
+++
+++	db = e4b.bd_info;
+++	/* there are blocks to put in buddy to make them really free */
+++	count += entry->efd_count;
+++	count2++;
+++	ext4_lock_group(sb, entry->efd_group);
+++	/* Take it out of per group rb tree */
+++	rb_erase(&entry->efd_node, &(db->bb_free_root));
+++	mb_free_blocks(NULL, &e4b, entry->efd_start_blk, entry->efd_count);
+++
+++	if (!db->bb_free_root.rb_node) {
+++		/* No more items in the per group rb tree
+++		 * balance refcounts from ext4_mb_free_metadata()
+++		 */
+++		page_cache_release(e4b.bd_buddy_page);
+++		page_cache_release(e4b.bd_bitmap_page);
++ 	}
+++	ext4_unlock_group(sb, entry->efd_group);
+++	kmem_cache_free(ext4_free_data_cachep, entry);
+++	ext4_mb_release_desc(&e4b);
++ 
++ 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
++ }
++@@ -2852,22 +2841,22 @@
++ 		kmem_cache_create("ext4_alloc_context",
++ 				     sizeof(struct ext4_allocation_context),
++ 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
++-	if (ext4_ac_cachep == NULL) {
++-		kmem_cache_destroy(ext4_pspace_cachep);
++-		return -ENOMEM;
++-	}
+++	if (ext4_ac_cachep == NULL)
+++		goto out_err;
+++
+++	ext4_free_data_cachep =
+++		KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT);
+++	if (ext4_free_data_cachep == NULL)
+++		goto out1_err;
++ 
++-	ext4_free_ext_cachep =
++-		kmem_cache_create("ext4_free_block_extents",
++-				     sizeof(struct ext4_free_data),
++-				     0, SLAB_RECLAIM_ACCOUNT, NULL);
++-	if (ext4_free_ext_cachep == NULL) {
++-		kmem_cache_destroy(ext4_pspace_cachep);
++-		kmem_cache_destroy(ext4_ac_cachep);
++-		return -ENOMEM;
++-	}
++ 	ext4_create_debugfs_entry();
++ 	return 0;
+++
+++out1_err:
+++	kmem_cache_destroy(ext4_ac_cachep);
+++out_err:
+++	kmem_cache_destroy(ext4_pspace_cachep);
+++	return -ENOMEM;
++ }
++ 
++ void exit_ext4_mballoc(void)
++@@ -2879,7 +2868,7 @@
++ 	rcu_barrier();
++ 	kmem_cache_destroy(ext4_pspace_cachep);
++ 	kmem_cache_destroy(ext4_ac_cachep);
++-	kmem_cache_destroy(ext4_free_ext_cachep);
+++	kmem_cache_destroy(ext4_free_data_cachep);
++ 	ext4_remove_debugfs_entry();
++ }
++ 
++@@ -3421,8 +3410,8 @@
++ 	n = rb_first(&(grp->bb_free_root));
++ 
++ 	while (n) {
++-		entry = rb_entry(n, struct ext4_free_data, node);
++-		mb_set_bits(bitmap, entry->start_blk, entry->count);
+++		entry = rb_entry(n, struct ext4_free_data, efd_node);
+++		mb_set_bits(bitmap, entry->efd_start_blk, entry->efd_count);
++ 		n = rb_next(n);
++ 	}
++ 	return;
++@@ -4666,11 +4655,11 @@
++  * AND the blocks are associated with the same group.
++  */
++ static int can_merge(struct ext4_free_data *entry1,
++-			struct ext4_free_data *entry2)
+++		     struct ext4_free_data *entry2)
++ {
++-	if ((entry1->t_tid == entry2->t_tid) &&
++-	    (entry1->group == entry2->group) &&
++-	    ((entry1->start_blk + entry1->count) == entry2->start_blk))
+++	if ((entry1->efd_tid == entry2->efd_tid) &&
+++	    (entry1->efd_group == entry2->efd_group) &&
+++	    ((entry1->efd_start_blk + entry1->efd_count) == entry2->efd_start_blk))
++ 		return 1;
++ 	return 0;
++ }
++@@ -4683,7 +4672,6 @@
++ 	struct ext4_free_data *entry;
++ 	struct ext4_group_info *db = e4b->bd_info;
++ 	struct super_block *sb = e4b->bd_sb;
++-	struct ext4_sb_info *sbi = EXT4_SB(sb);
++ 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
++ 	struct rb_node *parent = NULL, *new_node;
++ 
++@@ -4691,8 +4679,8 @@
++ 	BUG_ON(e4b->bd_bitmap_page == NULL);
++ 	BUG_ON(e4b->bd_buddy_page == NULL);
++ 
++-	new_node = &new_entry->node;
++-	block = new_entry->start_blk;
+++	new_node = &new_entry->efd_node;
+++	block = new_entry->efd_start_blk;
++ 
++ 	if (!*n) {
++ 		/* first free block exent. We need to
++@@ -4705,15 +4693,15 @@
++ 	}
++ 	while (*n) {
++ 		parent = *n;
++-		entry = rb_entry(parent, struct ext4_free_data, node);
++-		if (block < entry->start_blk)
+++		entry = rb_entry(parent, struct ext4_free_data, efd_node);
+++		if (block < entry->efd_start_blk)
++ 			n = &(*n)->rb_left;
++-		else if (block >= (entry->start_blk + entry->count))
+++		else if (block >= (entry->efd_start_blk + entry->efd_count))
++ 			n = &(*n)->rb_right;
++ 		else {
++ 			ext4_grp_locked_error(sb, e4b->bd_group, __func__,
++ 					"Double free of blocks %d (%d %d)",
++-					block, entry->start_blk, entry->count);
+++					block, entry->efd_start_blk, entry->efd_count);
++ 			return 0;
++ 		}
++ 	}
++@@ -4724,34 +4712,29 @@
++ 	/* Now try to see the extent can be merged to left and right */
++ 	node = rb_prev(new_node);
++ 	if (node) {
++-		entry = rb_entry(node, struct ext4_free_data, node);
+++		entry = rb_entry(node, struct ext4_free_data, efd_node);
++ 		if (can_merge(entry, new_entry)) {
++-			new_entry->start_blk = entry->start_blk;
++-			new_entry->count += entry->count;
+++			new_entry->efd_start_blk = entry->efd_start_blk;
+++			new_entry->efd_count += entry->efd_count;
++ 			rb_erase(node, &(db->bb_free_root));
++-			spin_lock(&sbi->s_md_lock);
++-			list_del(&entry->list);
++-			spin_unlock(&sbi->s_md_lock);
++-			kmem_cache_free(ext4_free_ext_cachep, entry);
+++			ext4_journal_callback_del(handle, &entry->efd_jce);
+++			kmem_cache_free(ext4_free_data_cachep, entry);
++ 		}
++ 	}
++ 
++ 	node = rb_next(new_node);
++ 	if (node) {
++-		entry = rb_entry(node, struct ext4_free_data, node);
+++		entry = rb_entry(node, struct ext4_free_data, efd_node);
++ 		if (can_merge(new_entry, entry)) {
++-			new_entry->count += entry->count;
+++			new_entry->efd_count += entry->efd_count;
++ 			rb_erase(node, &(db->bb_free_root));
++-			spin_lock(&sbi->s_md_lock);
++-			list_del(&entry->list);
++-			spin_unlock(&sbi->s_md_lock);
++-			kmem_cache_free(ext4_free_ext_cachep, entry);
+++			ext4_journal_callback_del(handle, &entry->efd_jce);
+++			kmem_cache_free(ext4_free_data_cachep, entry);
++ 		}
++ 	}
++ 	/* Add the extent to transaction's private list */
++-	spin_lock(&sbi->s_md_lock);
++-	list_add(&new_entry->list, &handle->h_transaction->t_private_list);
++-	spin_unlock(&sbi->s_md_lock);
+++	ext4_journal_callback_add(handle, ext4_free_data_callback,
+++				  &new_entry->efd_jce);
++ 	return 0;
++ }
++ 
++@@ -4872,11 +4855,11 @@
++ 		 * blocks being freed are metadata. these blocks shouldn't
++ 		 * be used until this transaction is committed
++ 		 */
++-		new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
++-		new_entry->start_blk = bit;
++-		new_entry->group  = block_group;
++-		new_entry->count = count;
++-		new_entry->t_tid = handle->h_transaction->t_tid;
+++		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
+++		new_entry->efd_start_blk = bit;
+++		new_entry->efd_group  = block_group;
+++		new_entry->efd_count = count;
+++		new_entry->efd_tid = handle->h_transaction->t_tid;
++ 
++ 		ext4_lock_group(sb, block_group);
++ 		mb_clear_bits(bitmap_bh->b_data, bit, count);
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:11:38.097666318 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:11:44.197733192 +0200
++@@ -301,6 +301,23 @@
++ 
++ EXPORT_SYMBOL(ext4_journal_abort_handle);
++ 
+++static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
+++{
+++	struct super_block		*sb = journal->j_private;
+++	struct ext4_sb_info		*sbi = EXT4_SB(sb);
+++	int				error = is_journal_aborted(journal);
+++	struct ext4_journal_cb_entry	*jce, *tmp;
+++
+++	spin_lock(&sbi->s_md_lock);
+++	list_for_each_entry_safe(jce, tmp, &txn->t_private_list, jce_list) {
+++		list_del_init(&jce->jce_list);
+++		spin_unlock(&sbi->s_md_lock);
+++		jce->jce_func(sb, jce, error);
+++		spin_lock(&sbi->s_md_lock);
+++	}
+++	spin_unlock(&sbi->s_md_lock);
+++}
+++
++ /* Deal with the reporting of failure conditions on a filesystem such as
++  * inconsistencies detected or read IO failures.
++  *
++@@ -2979,6 +2996,8 @@
++ 	}
++ 	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
++ 
+++	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
+++
++ no_journal:
++ 	err = percpu_counter_init(&sbi->s_freeblocks_counter,
++ 				  ext4_count_free_blocks(sb));
+diff --git a/ldiskfs/kernel_patches/patches/ext4-kill-dx_root-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-kill-dx_root-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..fa4aa0d
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-kill-dx_root-2.6.32-vanilla.patch
+@@ -0,0 +1,237 @@
++removes static definition of dx_root struct. so that "." and ".." dirent can
++have extra data. This patch does not change any functionality but is required for
++ext4_data_in_dirent patch.
++ 
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:49.061664012 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:52.005665948 +0200
++@@ -114,22 +114,13 @@
++  * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
++  */
++ 
++-struct dx_root
+++struct dx_root_info
++ {
++-	struct fake_dirent dot;
++-	char dot_name[4];
++-	struct fake_dirent dotdot;
++-	char dotdot_name[4];
++-	struct dx_root_info
++-	{
++-		__le32 reserved_zero;
++-		u8 hash_version;
++-		u8 info_length; /* 8 */
++-		u8 indirect_levels;
++-		u8 unused_flags;
++-	}
++-	info;
++-	struct dx_entry	entries[0];
+++	__le32 reserved_zero;
+++	u8 hash_version;
+++	u8 info_length; /* 8 */
+++	u8 indirect_levels;
+++	u8 unused_flags;
++ };
++ 
++ struct dx_node
++@@ -243,6 +234,16 @@
++  * Future: use high four bits of block for coalesce-on-delete flags
++  * Mask them off for now.
++  */
+++struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
+++{
+++       /* get dotdot first */
+++       de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
+++
+++       /* dx root info is after dotdot entry */
+++       de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
+++
+++       return (struct dx_root_info *) de;
+++}
++ 
++ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
++ {
++@@ -397,7 +398,7 @@
++ {
++ 	unsigned count, indirect;
++ 	struct dx_entry *at, *entries, *p, *q, *m;
++-	struct dx_root *root;
+++	struct dx_root_info * info;
++ 	struct buffer_head *bh;
++ 	struct dx_frame *frame = frame_in;
++ 	u32 hash;
++@@ -405,18 +406,18 @@
++ 	frame->bh = NULL;
++ 	if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
++ 		goto fail;
++-	root = (struct dx_root *) bh->b_data;
++-	if (root->info.hash_version != DX_HASH_TEA &&
++-	    root->info.hash_version != DX_HASH_HALF_MD4 &&
++-	    root->info.hash_version != DX_HASH_LEGACY) {
+++	info = dx_get_dx_info((struct ext4_dir_entry_2*)bh->b_data);
+++	if (info->hash_version != DX_HASH_TEA &&
+++	    info->hash_version != DX_HASH_HALF_MD4 &&
+++	    info->hash_version != DX_HASH_LEGACY) {
++ 		ext4_warning(dir->i_sb, __func__,
++ 			     "Unrecognised inode hash code %d for directory "
++-                            "#%lu", root->info.hash_version, dir->i_ino);
+++			     "#%lu", info->hash_version, dir->i_ino);
++ 		brelse(bh);
++ 		*err = ERR_BAD_DX_DIR;
++ 		goto fail;
++ 	}
++-	hinfo->hash_version = root->info.hash_version;
+++	hinfo->hash_version = info->hash_version;
++ 	if (hinfo->hash_version <= DX_HASH_TEA)
++ 		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
++ 	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
++@@ -424,29 +425,28 @@
++ 		ext4fs_dirhash(d_name->name, d_name->len, hinfo);
++ 	hash = hinfo->hash;
++ 
++-	if (root->info.unused_flags & 1) {
+++	if (info->unused_flags & 1) {
++ 		ext4_warning(dir->i_sb, __func__,
++ 			     "Unimplemented inode hash flags: %#06x",
++-			     root->info.unused_flags);
+++			     info->unused_flags);
++ 		brelse(bh);
++ 		*err = ERR_BAD_DX_DIR;
++ 		goto fail;
++ 	}
++ 
++-	if ((indirect = root->info.indirect_levels) > 1) {
+++	if ((indirect = info->indirect_levels) > 1) {
++ 		ext4_warning(dir->i_sb, __func__,
++ 			     "Unimplemented inode hash depth: %#06x",
++-			     root->info.indirect_levels);
+++			     info->indirect_levels);
++ 		brelse(bh);
++ 		*err = ERR_BAD_DX_DIR;
++ 		goto fail;
++ 	}
++ 
++-	entries = (struct dx_entry *) (((char *)&root->info) +
++-				       root->info.info_length);
+++	entries = (struct dx_entry *) (((char *)info) + info->info_length);
++ 
++ 	if (dx_get_limit(entries) != dx_root_limit(dir,
++-						   root->info.info_length)) {
+++						   info->info_length)) {
++ 		ext4_warning(dir->i_sb, __func__,
++ 			     "dx entry: limit != root limit");
++ 		brelse(bh);
++@@ -528,10 +528,12 @@
++ 
++ static void dx_release (struct dx_frame *frames)
++ {
+++	struct dx_root_info *info;
++ 	if (frames[0].bh == NULL)
++ 		return;
++ 
++-	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
+++	info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
+++	if (info->indirect_levels)
++ 		brelse(frames[1].bh);
++ 	brelse(frames[0].bh);
++ }
++@@ -1442,17 +1444,16 @@
++ 	const char	*name = dentry->d_name.name;
++ 	int		namelen = dentry->d_name.len;
++ 	struct buffer_head *bh2;
++-	struct dx_root	*root;
++ 	struct dx_frame	frames[2], *frame;
++ 	struct dx_entry *entries;
++-	struct ext4_dir_entry_2	*de, *de2;
+++	struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
++ 	char		*data1, *top;
++ 	unsigned	len;
++ 	int		retval;
++ 	unsigned	blocksize;
++ 	struct dx_hash_info hinfo;
++ 	ext4_lblk_t  block;
++-	struct fake_dirent *fde;
+++	struct dx_root_info *dx_info;
++ 
++ 	blocksize =  dir->i_sb->s_blocksize;
++ 	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
++@@ -1462,20 +1463,20 @@
++ 		brelse(bh);
++ 		return retval;
++ 	}
++-	root = (struct dx_root *) bh->b_data;
+++	dot_de = (struct ext4_dir_entry_2 *) bh->b_data;
+++	dotdot_de = ext4_next_entry(dot_de, blocksize);
++ 
++ 	/* The 0th block becomes the root, move the dirents out */
++-	fde = &root->dotdot;
++-	de = (struct ext4_dir_entry_2 *)((char *)fde +
++-		ext4_rec_len_from_disk(fde->rec_len, blocksize));
++-	if ((char *) de >= (((char *) root) + blocksize)) {
+++	de = (struct ext4_dir_entry_2 *)((char *)dotdot_de +
+++		ext4_rec_len_from_disk(dotdot_de->rec_len, blocksize));
+++	if ((char *) de >= (((char *) dot_de) + blocksize)) {
++ 		ext4_error(dir->i_sb, __func__,
++ 			   "invalid rec_len for '..' in inode %lu",
++ 			   dir->i_ino);
++ 		brelse(bh);
++ 		return -EIO;
++ 	}
++-	len = ((char *) root) + blocksize - (char *) de;
+++	len = ((char *) dot_de) + blocksize - (char *) de;
++ 
++ 	/* Allocate new block for the 0th block's dirents */
++ 	bh2 = ext4_append(handle, dir, &block, &retval);
++@@ -1494,19 +1495,23 @@
++ 	de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
++ 					   blocksize);
++ 	/* Initialize the root; the dot dirents already exist */
++-	de = (struct ext4_dir_entry_2 *) (&root->dotdot);
++-	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
++-					   blocksize);
++-	memset (&root->info, 0, sizeof(root->info));
++-	root->info.info_length = sizeof(root->info);
++-	root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
++-	entries = root->entries;
+++	dotdot_de->rec_len = ext4_rec_len_to_disk(blocksize -
+++			le16_to_cpu(dot_de->rec_len), blocksize);
+++
+++	/* initialize hashing info */
+++	dx_info = dx_get_dx_info(dot_de);
+++	memset (dx_info, 0, sizeof(*dx_info));
+++	dx_info->info_length = sizeof(*dx_info);
+++	dx_info->hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+++
+++	entries = (void *)dx_info + sizeof(*dx_info);
+++
++ 	dx_set_block(entries, 1);
++ 	dx_set_count(entries, 1);
++-	dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
+++	dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
++ 
++ 	/* Initialize as for dx_probe */
++-	hinfo.hash_version = root->info.hash_version;
+++	hinfo.hash_version = dx_info->hash_version;
++ 	if (hinfo.hash_version <= DX_HASH_TEA)
++ 		hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
++ 	hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
++@@ -1756,6 +1761,7 @@
++ 				goto journal_error;
++ 			brelse (bh2);
++ 		} else {
+++			struct dx_root_info * info;
++ 			dxtrace(printk(KERN_DEBUG
++ 				       "Creating second level index...\n"));
++ 			memcpy((char *) entries2, (char *) entries,
++@@ -1765,7 +1771,9 @@
++ 			/* Set up root */
++ 			dx_set_count(entries, 1);
++ 			dx_set_block(entries + 0, newblock);
++-			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
+++			info = dx_get_dx_info((struct ext4_dir_entry_2*)
+++					frames[0].bh->b_data);
+++			info->indirect_levels = 1;
++ 
++ 			/* Add new access path frame */
++ 			frame = frames + 1;
+diff --git a/ldiskfs/kernel_patches/patches/ext4-large-eas-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-large-eas-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..443a153
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-large-eas-2.6.32-vanilla.patch
+@@ -0,0 +1,736 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:11:16.361665139 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:20.317665268 +0200
++@@ -1258,6 +1258,7 @@
++ #define EXT4_FEATURE_INCOMPAT_64BIT		0x0080
++ #define EXT4_FEATURE_INCOMPAT_MMP               0x0100
++ #define EXT4_FEATURE_INCOMPAT_FLEX_BG		0x0200
+++#define EXT4_FEATURE_INCOMPAT_EA_INODE		0x0400
++ #define EXT4_FEATURE_INCOMPAT_DIRDATA		0x1000
++ 
++ #define EXT4_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
++@@ -1267,6 +1268,7 @@
++ 					 EXT4_FEATURE_INCOMPAT_EXTENTS| \
++ 					 EXT4_FEATURE_INCOMPAT_64BIT| \
++ 					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+++					 EXT4_FEATURE_INCOMPAT_EA_INODE| \
++ 					 EXT4_FEATURE_INCOMPAT_MMP| \
++ 					 EXT4_FEATURE_INCOMPAT_DIRDATA)
++ 
++@@ -1565,6 +1567,12 @@
++ #endif
++ 
++ /*
+++ * Maximum size of xattr attributes for FEATURE_INCOMPAT_EA_INODE 1Mb
+++ * This limit is arbitrary, but is reasonable for the xattr API.
+++ */
+++#define EXT4_XATTR_MAX_LARGE_EA_SIZE    (1024 * 1024)
+++
+++/*
++  * Function prototypes
++  */
++ 
++Index: linux-source-2.6.32/fs/ext4/xattr.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/xattr.c	2012-06-28 12:10:12.481658638 +0200
+++++ linux-source-2.6.32/fs/ext4/xattr.c	2012-06-28 12:11:20.321664768 +0200
++@@ -168,19 +168,26 @@
++ }
++ 
++ static inline int
++-ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
+++ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size,
+++		       struct inode *inode)
++ {
++ 	size_t value_size = le32_to_cpu(entry->e_value_size);
++ 
++-	if (entry->e_value_block != 0 || value_size > size ||
++-	    le16_to_cpu(entry->e_value_offs) + value_size > size)
+++	if ((entry->e_value_inum == 0) &&
+++	   (le16_to_cpu(entry->e_value_offs) + value_size > size))
+++		return -EIO;
+++	if (entry->e_value_inum != 0 &&
+++	    (le32_to_cpu(entry->e_value_inum) < EXT4_FIRST_INO(inode->i_sb) ||
+++	     le32_to_cpu(entry->e_value_inum) >
+++	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_inodes_count)))
++ 		return -EIO;
++ 	return 0;
++ }
++ 
++ static int
++ ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
++-		      const char *name, size_t size, int sorted)
+++		      const char *name, size_t size, int sorted,
+++		      struct inode *inode)
++ {
++ 	struct ext4_xattr_entry *entry;
++ 	size_t name_len;
++@@ -200,11 +207,103 @@
++ 			break;
++ 	}
++ 	*pentry = entry;
++-	if (!cmp && ext4_xattr_check_entry(entry, size))
+++	if (!cmp && ext4_xattr_check_entry(entry, size, inode))
++ 			return -EIO;
++ 	return cmp ? -ENODATA : 0;
++ }
++ 
+++/*
+++ * Read the EA value from an inode.
+++ */
+++static int
+++ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t *size)
+++{
+++	unsigned long block = 0;
+++	struct buffer_head *bh = NULL;
+++	int err, blocksize;
+++	size_t csize, ret_size = 0;
+++
+++	if (*size == 0)
+++		return 0;
+++
+++	blocksize = ea_inode->i_sb->s_blocksize;
+++
+++	while (ret_size < *size) {
+++		csize = (*size - ret_size) > blocksize ? blocksize :
+++							*size - ret_size;
+++		bh = ext4_bread(NULL, ea_inode, block, 0, &err);
+++		if (!bh) {
+++			*size = ret_size;
+++			return err;
+++		}
+++		memcpy(buf, bh->b_data, csize);
+++		brelse(bh);
+++
+++		buf += csize;
+++		block += 1;
+++		ret_size += csize;
+++	}
+++
+++	*size = ret_size;
+++
+++	return err;
+++}
+++
+++struct inode *ext4_xattr_inode_iget(struct inode *parent, int ea_ino, int *err)
+++{
+++	struct inode *ea_inode = NULL;
+++
+++	ea_inode = ext4_iget(parent->i_sb, ea_ino);
+++	if (ea_inode == NULL || is_bad_inode(ea_inode)) {
+++		ext4_error(parent->i_sb, "error while reading EA inode %d",
+++			   ea_ino);
+++		*err = -EIO;
+++		return NULL;
+++	}
+++
+++	if (ea_inode->i_xattr_inode_parent != parent->i_ino ||
+++	    ea_inode->i_generation != parent->i_generation) {
+++		ext4_error(parent->i_sb, "Backpointer from EA inode %d "
+++			   "to parent invalid.", ea_ino);
+++		*err = -EINVAL;
+++		goto error;
+++	}
+++
+++	if (!(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL)) {
+++		ext4_error(parent->i_sb, "EA inode %d does not have "
+++			   "EXT4_EA_INODE_FL flag set.\n", ea_ino);
+++		*err = -EINVAL;
+++		goto error;
+++	}
+++
+++	*err = 0;
+++	return ea_inode;
+++
+++error:
+++	iput(ea_inode);
+++	return NULL;
+++}
+++
+++/*
+++ * Read the value from the EA inode.
+++ */
+++static int
+++ext4_xattr_inode_get(struct inode *inode, int ea_ino, void *buffer,
+++		     size_t *size)
+++{
+++	struct inode *ea_inode = NULL;
+++	int err;
+++
+++	ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
+++	if (err)
+++		return err;
+++
+++	err = ext4_xattr_inode_read(ea_inode, buffer, size);
+++	iput(ea_inode);
+++
+++	return err;
+++}
+++
++ static int
++ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
++ 		     void *buffer, size_t buffer_size)
++@@ -235,7 +334,8 @@
++ 	}
++ 	ext4_xattr_cache_insert(bh);
++ 	entry = BFIRST(bh);
++-	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
+++	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
+++				      inode);
++ 	if (error == -EIO)
++ 		goto bad_block;
++ 	if (error)
++@@ -245,8 +345,16 @@
++ 		error = -ERANGE;
++ 		if (size > buffer_size)
++ 			goto cleanup;
++-		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
++-		       size);
+++		if (entry->e_value_inum != 0) {
+++			error = ext4_xattr_inode_get(inode,
+++					     le32_to_cpu(entry->e_value_inum),
+++					     buffer, &size);
+++			if (error)
+++				goto cleanup;
+++		} else {
+++			memcpy(buffer, bh->b_data +
+++			       le16_to_cpu(entry->e_value_offs), size);
+++		}
++ 	}
++ 	error = size;
++ 
++@@ -280,7 +388,7 @@
++ 	if (error)
++ 		goto cleanup;
++ 	error = ext4_xattr_find_entry(&entry, name_index, name,
++-				      end - (void *)entry, 0);
+++				      end - (void *)entry, 0, inode);
++ 	if (error)
++ 		goto cleanup;
++ 	size = le32_to_cpu(entry->e_value_size);
++@@ -288,8 +396,16 @@
++ 		error = -ERANGE;
++ 		if (size > buffer_size)
++ 			goto cleanup;
++-		memcpy(buffer, (void *)IFIRST(header) +
++-		       le16_to_cpu(entry->e_value_offs), size);
+++		if (entry->e_value_inum != 0) {
+++			error = ext4_xattr_inode_get(inode,
+++					     le32_to_cpu(entry->e_value_inum),
+++					     buffer, &size);
+++			if (error)
+++				goto cleanup;
+++		} else {
+++			memcpy(buffer, (void *)IFIRST(header) +
+++			       le16_to_cpu(entry->e_value_offs), size);
+++		}
++ 	}
++ 	error = size;
++ 
++@@ -511,7 +627,7 @@
++ {
++ 	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
++ 		*total += EXT4_XATTR_LEN(last->e_name_len);
++-		if (!last->e_value_block && last->e_value_size) {
+++		if (last->e_value_inum == 0 && last->e_value_size > 0) {
++ 			size_t offs = le16_to_cpu(last->e_value_offs);
++ 			if (offs < *min_offs)
++ 				*min_offs = offs;
++@@ -520,11 +636,159 @@
++ 	return (*min_offs - ((void *)last - base) - sizeof(__u32));
++ }
++ 
+++/*
+++ * Write the value of the EA in an inode.
+++ */
+++static int
+++ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
+++		       const void *buf, int bufsize)
+++{
+++	struct buffer_head *bh = NULL, dummy;
+++	unsigned long block = 0;
+++	unsigned blocksize = ea_inode->i_sb->s_blocksize;
+++	unsigned max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
+++	int csize, wsize = 0;
+++	int ret = 0;
+++	int retries = 0;
+++
+++retry:
+++	while (ret >= 0 && ret < max_blocks) {
+++		block += ret;
+++		max_blocks -= ret;
+++
+++		ret = ext4_get_blocks(handle, ea_inode, block, max_blocks,
+++				      &dummy, EXT4_GET_BLOCKS_CREATE);
+++		if (ret <= 0) {
+++			ext4_mark_inode_dirty(handle, ea_inode);
+++			if (ret == -ENOSPC &&
+++			    ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
+++				ret = 0;
+++				goto retry;
+++			}
+++			break;
+++		}
+++	}
+++
+++	if (ret < 0)
+++		return ret;
+++
+++	block = 0;
+++	while (wsize < bufsize) {
+++		if (bh != NULL)
+++			brelse(bh);
+++		csize = (bufsize - wsize) > blocksize ? blocksize :
+++								bufsize - wsize;
+++		bh = ext4_getblk(handle, ea_inode, block, 0, &ret);
+++		if (!bh)
+++			goto out;
+++		ret = ext4_journal_get_write_access(handle, bh);
+++		if (ret)
+++			goto out;
+++
+++		memcpy(bh->b_data, buf, csize);
+++		set_buffer_uptodate(bh);
+++		ext4_journal_dirty_metadata(handle, bh);
+++
+++		buf += csize;
+++		wsize += csize;
+++		block += 1;
+++	}
+++
+++	i_size_write(ea_inode, wsize);
+++	ext4_update_i_disksize(ea_inode, wsize);
+++
+++	ext4_mark_inode_dirty(handle, ea_inode);
+++
+++out:
+++	brelse(bh);
+++
+++	return ret;
+++}
+++
+++/*
+++ * Create an inode to store the value of a large EA.
+++ */
+++static struct inode *
+++ext4_xattr_inode_create(handle_t *handle, struct inode *inode)
+++{
+++	struct inode *ea_inode = NULL;
+++
+++	/*
+++	 * Let the next inode be the goal, so we try and allocate the EA inode
+++	 * in the same group, or nearby one.
+++	 */
+++	ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
+++				  S_IFREG|0600, NULL, inode->i_ino + 1);
+++
+++	if (!IS_ERR(ea_inode)) {
+++		ea_inode->i_op = &ext4_file_inode_operations;
+++		ea_inode->i_fop = &ext4_file_operations;
+++		ext4_set_aops(ea_inode);
+++		ea_inode->i_generation = inode->i_generation;
+++		EXT4_I(ea_inode)->i_flags |= EXT4_EA_INODE_FL;
+++
+++		/*
+++		 * A back-pointer from EA inode to parent inode will be useful
+++		 * for e2fsck.
+++		 */
+++		ea_inode->i_xattr_inode_parent = inode->i_ino;
+++		unlock_new_inode(ea_inode);
+++	}
+++
+++	return ea_inode;
+++}
+++
+++/*
+++ * Unlink the inode storing the value of the EA.
+++ */
+++static int
+++ext4_xattr_inode_unlink(struct inode *inode, int ea_ino)
+++{
+++	struct inode *ea_inode = NULL;
+++	int err;
+++
+++	ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
+++	if (err)
+++		return err;
+++
+++	ea_inode->i_nlink = 0;
+++	iput(ea_inode);
+++
+++	return 0;
+++}
+++
+++/*
+++ * Add value of the EA in an inode.
+++ */
+++static int
+++ext4_xattr_inode_set(handle_t *handle, struct inode *inode, int *ea_ino,
+++		     const void *value, size_t value_len)
+++{
+++	struct inode *ea_inode = NULL;
+++	int err;
+++
+++	/* Create an inode for the EA value */
+++	ea_inode = ext4_xattr_inode_create(handle, inode);
+++	if (IS_ERR(ea_inode))
+++		return -1;
+++
+++	err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
+++	if (err)
+++		ea_inode->i_nlink = 0;
+++	else
+++		*ea_ino = ea_inode->i_ino;
+++
+++	iput(ea_inode);
+++
+++	return err;
+++}
+++
++ struct ext4_xattr_info {
++-	int name_index;
++ 	const char *name;
++ 	const void *value;
++ 	size_t value_len;
+++	int name_index;
+++	int in_inode;
++ };
++ 
++ struct ext4_xattr_search {
++@@ -536,15 +800,23 @@
++ };
++ 
++ static int
++-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+++ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+++		     handle_t *handle, struct inode *inode)
++ {
++ 	struct ext4_xattr_entry *last;
++ 	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
+++	int in_inode = i->in_inode;
+++
+++	if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
+++		 EXT4_FEATURE_INCOMPAT_EA_INODE) &&
+++	    (EXT4_XATTR_SIZE(i->value_len) >
+++	     EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
+++		in_inode = 1;
++ 
++ 	/* Compute min_offs and last. */
++ 	last = s->first;
++ 	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
++-		if (!last->e_value_block && last->e_value_size) {
+++		if (last->e_value_inum == 0 && last->e_value_size > 0) {
++ 			size_t offs = le16_to_cpu(last->e_value_offs);
++ 			if (offs < min_offs)
++ 				min_offs = offs;
++@@ -552,16 +824,21 @@
++ 	}
++ 	free = min_offs - ((void *)last - s->base) - sizeof(__u32);
++ 	if (!s->not_found) {
++-		if (!s->here->e_value_block && s->here->e_value_size) {
+++		if (!in_inode && s->here->e_value_inum == 0 &&
+++		    s->here->e_value_size > 0) {
++ 			size_t size = le32_to_cpu(s->here->e_value_size);
++ 			free += EXT4_XATTR_SIZE(size);
++ 		}
++ 		free += EXT4_XATTR_LEN(name_len);
++ 	}
++ 	if (i->value) {
++-		if (free < EXT4_XATTR_SIZE(i->value_len) ||
++-		    free < EXT4_XATTR_LEN(name_len) +
++-			   EXT4_XATTR_SIZE(i->value_len))
+++		size_t value_len = EXT4_XATTR_SIZE(i->value_len);
+++
+++		if (in_inode)
+++			value_len = 0;
+++
+++		if (free < value_len ||
+++		    free < EXT4_XATTR_LEN(name_len) + value_len)
++ 			return -ENOSPC;
++ 	}
++ 
++@@ -575,7 +852,8 @@
++ 		s->here->e_name_len = name_len;
++ 		memcpy(s->here->e_name, i->name, name_len);
++ 	} else {
++-		if (!s->here->e_value_block && s->here->e_value_size) {
+++		if (s->here->e_value_offs > 0 && s->here->e_value_inum == 0 &&
+++		    s->here->e_value_size > 0) {
++ 			void *first_val = s->base + min_offs;
++ 			size_t offs = le16_to_cpu(s->here->e_value_offs);
++ 			void *val = s->base + offs;
++@@ -604,13 +882,17 @@
++ 			last = s->first;
++ 			while (!IS_LAST_ENTRY(last)) {
++ 				size_t o = le16_to_cpu(last->e_value_offs);
++-				if (!last->e_value_block &&
++-				    last->e_value_size && o < offs)
+++				if (last->e_value_size > 0 && o < offs)
++ 					last->e_value_offs =
++ 						cpu_to_le16(o + size);
++ 				last = EXT4_XATTR_NEXT(last);
++ 			}
++ 		}
+++		if (s->here->e_value_inum != 0) {
+++			ext4_xattr_inode_unlink(inode,
+++					le32_to_cpu(s->here->e_value_inum));
+++			s->here->e_value_inum = 0;
+++		}
++ 		if (!i->value) {
++ 			/* Remove the old name. */
++ 			size_t size = EXT4_XATTR_LEN(name_len);
++@@ -624,10 +906,17 @@
++ 	if (i->value) {
++ 		/* Insert the new value. */
++ 		s->here->e_value_size = cpu_to_le32(i->value_len);
++-		if (i->value_len) {
+++		if (in_inode) {
+++			int ea_ino = le32_to_cpu(s->here->e_value_inum);
+++			ext4_xattr_inode_set(handle, inode, &ea_ino, i->value,
+++					     i->value_len);
+++			s->here->e_value_inum = cpu_to_le32(ea_ino);
+++			s->here->e_value_offs = 0;
+++		} else if (i->value_len) {
++ 			size_t size = EXT4_XATTR_SIZE(i->value_len);
++ 			void *val = s->base + min_offs - size;
++ 			s->here->e_value_offs = cpu_to_le16(min_offs - size);
+++			s->here->e_value_inum = 0;
++ 			memset(val + size - EXT4_XATTR_PAD, 0,
++ 			       EXT4_XATTR_PAD); /* Clear the pad bytes. */
++ 			memcpy(val, i->value, i->value_len);
++@@ -673,7 +962,7 @@
++ 		bs->s.end = bs->bh->b_data + bs->bh->b_size;
++ 		bs->s.here = bs->s.first;
++ 		error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
++-					      i->name, bs->bh->b_size, 1);
+++					     i->name, bs->bh->b_size, 1, inode);
++ 		if (error && error != -ENODATA)
++ 			goto cleanup;
++ 		bs->s.not_found = error;
++@@ -697,8 +986,6 @@
++ 
++ #define header(x) ((struct ext4_xattr_header *)(x))
++ 
++-	if (i->value && i->value_len > sb->s_blocksize)
++-		return -ENOSPC;
++ 	if (s->base) {
++ 		ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
++ 					bs->bh->b_blocknr);
++@@ -713,7 +1000,7 @@
++ 				ce = NULL;
++ 			}
++ 			ea_bdebug(bs->bh, "modifying in-place");
++-			error = ext4_xattr_set_entry(i, s);
+++			error = ext4_xattr_set_entry(i, s, handle, inode);
++ 			if (!error) {
++ 				if (!IS_LAST_ENTRY(s->first))
++ 					ext4_xattr_rehash(header(s->base),
++@@ -765,7 +1052,7 @@
++ 		s->end = s->base + sb->s_blocksize;
++ 	}
++ 
++-	error = ext4_xattr_set_entry(i, s);
+++	error = ext4_xattr_set_entry(i, s, handle, inode);
++ 	if (error == -EIO)
++ 		goto bad_block;
++ 	if (error)
++@@ -910,7 +1197,7 @@
++ 		/* Find the named attribute. */
++ 		error = ext4_xattr_find_entry(&is->s.here, i->name_index,
++ 					      i->name, is->s.end -
++-					      (void *)is->s.base, 0);
+++					      (void *)is->s.base, 0, inode);
++ 		if (error && error != -ENODATA)
++ 			return error;
++ 		is->s.not_found = error;
++@@ -929,7 +1216,7 @@
++ 
++ 	if (EXT4_I(inode)->i_extra_isize == 0)
++ 		return -ENOSPC;
++-	error = ext4_xattr_set_entry(i, s);
+++	error = ext4_xattr_set_entry(i, s, handle, inode);
++ 	if (error)
++ 		return error;
++ 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
++@@ -965,7 +1252,7 @@
++ 		.name = name,
++ 		.value = value,
++ 		.value_len = value_len,
++-
+++		.in_inode = 0,
++ 	};
++ 	struct ext4_xattr_ibody_find is = {
++ 		.s = { .not_found = -ENODATA, },
++@@ -1034,6 +1321,15 @@
++ 					goto cleanup;
++ 			}
++ 			error = ext4_xattr_block_set(handle, inode, &i, &bs);
+++			if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
+++					EXT4_FEATURE_INCOMPAT_EA_INODE) &&
+++			    error == -ENOSPC) {
+++				/* xattr not fit to block, store at external
+++				 * inode */
+++				i.in_inode = 1;
+++				error = ext4_xattr_ibody_set(handle, inode,
+++							     &i, &is);
+++			}
++ 			if (error)
++ 				goto cleanup;
++ 			if (!is.s.not_found) {
++@@ -1081,10 +1377,25 @@
++ 	       const void *value, size_t value_len, int flags)
++ {
++ 	handle_t *handle;
+++	struct super_block *sb = inode->i_sb;
+++	int buffer_credits;
++ 	int error, retries = 0;
++ 
+++	buffer_credits = EXT4_DATA_TRANS_BLOCKS(sb);
+++	if ((value_len >= EXT4_XATTR_MIN_LARGE_EA_SIZE(sb->s_blocksize)) &&
+++	    EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EA_INODE)) {
+++		int nrblocks = (value_len + sb->s_blocksize - 1) >>
+++					sb->s_blocksize_bits;
+++
+++		/* For new inode */
+++		buffer_credits += EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + 3;
+++
+++		/* For data blocks of EA inode */
+++		buffer_credits += ext4_meta_trans_blocks(inode, nrblocks, 0);
+++	}
+++
++ retry:
++-	handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+++	handle = ext4_journal_start(inode, buffer_credits);
++ 	if (IS_ERR(handle)) {
++ 		error = PTR_ERR(handle);
++ 	} else {
++@@ -1094,7 +1405,7 @@
++ 					      value, value_len, flags);
++ 		error2 = ext4_journal_stop(handle);
++ 		if (error == -ENOSPC &&
++-		    ext4_should_retry_alloc(inode->i_sb, &retries))
+++		    ext4_should_retry_alloc(sb, &retries))
++ 			goto retry;
++ 		if (error == 0)
++ 			error = error2;
++@@ -1116,7 +1427,7 @@
++ 
++ 	/* Adjust the value offsets of the entries */
++ 	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
++-		if (!last->e_value_block && last->e_value_size) {
+++		if (last->e_value_inum == 0 && last->e_value_size > 0) {
++ 			new_offs = le16_to_cpu(last->e_value_offs) +
++ 							value_offs_shift;
++ 			BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
++@@ -1355,15 +1666,41 @@
++ /*
++  * ext4_xattr_delete_inode()
++  *
++- * Free extended attribute resources associated with this inode. This
+++ * Free extended attribute resources associated with this inode. Traverse
+++ * all entries and unlink any xattr inodes associated with this inode. This
++  * is called immediately before an inode is freed. We have exclusive
++- * access to the inode.
+++ * access to the inode. If an orphan inode is deleted it will also delete any
+++ * xattr block and all xattr inodes. They are checked by ext4_xattr_inode_iget()
+++ * to ensure they belong to the parent inode and were not deleted already.
++  */
++ void
++ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
++ {
++ 	struct buffer_head *bh = NULL;
+++	struct ext4_xattr_ibody_header *header;
+++	struct ext4_inode *raw_inode;
+++	struct ext4_iloc iloc;
+++	struct ext4_xattr_entry *entry;
+++	int error;
+++
+++	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
+++		goto delete_external_ea;
+++
+++	error = ext4_get_inode_loc(inode, &iloc);
+++	if (error)
+++		goto cleanup;
+++	raw_inode = ext4_raw_inode(&iloc);
+++	header = IHDR(inode, raw_inode);
+++	entry = IFIRST(header);
+++	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+++		if (entry->e_value_inum != 0) {
+++			ext4_xattr_inode_unlink(inode,
+++					le32_to_cpu(entry->e_value_inum));
+++			entry->e_value_inum = 0;
+++		}
+++	}
++ 
+++delete_external_ea:
++ 	if (!EXT4_I(inode)->i_file_acl)
++ 		goto cleanup;
++ 	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
++@@ -1380,6 +1717,16 @@
++ 			EXT4_I(inode)->i_file_acl);
++ 		goto cleanup;
++ 	}
+++
+++	entry = BFIRST(bh);
+++	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+++		if (entry->e_value_inum != 0) {
+++			ext4_xattr_inode_unlink(inode,
+++					le32_to_cpu(entry->e_value_inum));
+++			entry->e_value_inum = 0;
+++		}
+++	}
+++
++ 	ext4_xattr_release_block(handle, inode, bh);
++ 	EXT4_I(inode)->i_file_acl = 0;
++ 
++@@ -1454,10 +1801,9 @@
++ 		    entry1->e_name_index != entry2->e_name_index ||
++ 		    entry1->e_name_len != entry2->e_name_len ||
++ 		    entry1->e_value_size != entry2->e_value_size ||
+++		    entry1->e_value_inum != entry2->e_value_inum ||
++ 		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
++ 			return 1;
++-		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
++-			return -EIO;
++ 		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
++ 			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
++ 			   le32_to_cpu(entry1->e_value_size)))
++@@ -1542,7 +1888,7 @@
++ 		       *name++;
++ 	}
++ 
++-	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
+++	if (entry->e_value_inum == 0 && entry->e_value_size != 0) {
++ 		__le32 *value = (__le32 *)((char *)header +
++ 			le16_to_cpu(entry->e_value_offs));
++ 		for (n = (le32_to_cpu(entry->e_value_size) +
++Index: linux-source-2.6.32/fs/ext4/xattr.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/xattr.h	2012-06-28 12:08:14.717669056 +0200
+++++ linux-source-2.6.32/fs/ext4/xattr.h	2012-06-28 12:11:20.321664768 +0200
++@@ -38,7 +38,7 @@
++ 	__u8	e_name_len;	/* length of name */
++ 	__u8	e_name_index;	/* attribute name index */
++ 	__le16	e_value_offs;	/* offset in disk block of value */
++-	__le32	e_value_block;	/* disk block attribute is stored on (n/i) */
+++	__le32	e_value_inum;	/* inode in which the value is stored */
++ 	__le32	e_value_size;	/* size of attribute value */
++ 	__le32	e_hash;		/* hash value of name and value */
++ 	char	e_name[0];	/* attribute name */
++@@ -63,6 +63,15 @@
++ 		EXT4_I(inode)->i_extra_isize))
++ #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
++ 
+++#define i_xattr_inode_parent i_mtime.tv_sec
+++
+++/*
+++ * The minimum size of EA value when you start storing it in an external inode
+++ * size of block - size of header - size of 1 entry - 4 null bytes
+++*/
+++#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b)					\
+++	((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
+++
++ # ifdef CONFIG_EXT4_FS_XATTR
++ 
++ extern struct xattr_handler ext4_xattr_user_handler;
+diff --git a/ldiskfs/kernel_patches/patches/ext4-lookup-dotdot-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-lookup-dotdot-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..70c0f57
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-lookup-dotdot-2.6.32-vanilla.patch
+@@ -0,0 +1,43 @@
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:09:27.373665875 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:03.433668774 +0200
++@@ -1098,6 +1098,38 @@
++ 			}
++ 		}
++ 	}
+++	/* ".." shouldn't go into dcache to preserve dcache hierarchy
+++	 * otherwise we'll get parent being a child of actual child.
+++	 * see bug 10458 for details -bzzz */
+++	if (inode && (dentry->d_name.name[0] == '.' && (dentry->d_name.len == 1 ||
+++		(dentry->d_name.len == 2 && dentry->d_name.name[1] == '.')))) {
+++		struct dentry *tmp, *goal = NULL;
+++		struct list_head *lp;
+++
+++		/* first, look for an existing dentry - any one is good */
+++		spin_lock(&dcache_lock);
+++		list_for_each(lp, &inode->i_dentry) {
+++			tmp = list_entry(lp, struct dentry, d_alias);
+++			goal = tmp;
+++			dget_locked(goal);
+++			break;
+++		}
+++		if (goal == NULL) {
+++			/* there is no alias, we need to make current dentry:
+++			 *  a) inaccessible for __d_lookup()
+++			 *  b) inaccessible for iopen */
+++			J_ASSERT(list_empty(&dentry->d_alias));
+++			dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+++			/* this is d_instantiate() ... */
+++			list_add(&dentry->d_alias, &inode->i_dentry);
+++			dentry->d_inode = inode;
+++		}
+++		spin_unlock(&dcache_lock);
+++		if (goal)
+++			iput(inode);
+++		return goal;
+++	}
+++
++ 	return d_splice_alias(inode, dentry);
++ }
++ 
+diff --git a/ldiskfs/kernel_patches/patches/ext4-map_inode_page-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-map_inode_page-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..f260449
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-map_inode_page-2.6.32-vanilla.patch
+@@ -0,0 +1,87 @@
++Index: linux-source-2.6.32/fs/ext4/inode.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:08:38.957670045 +0200
+++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:09:14.201666378 +0200
++@@ -5905,3 +5905,67 @@
++ 	up_read(&inode->i_alloc_sem);
++ 	return ret;
++ }
+++
+++int ext4_map_inode_page(struct inode *inode, struct page *page,
+++			unsigned long *blocks, int *created, int create)
+++{
+++	unsigned int blocksize, blocks_per_page;
+++	unsigned long iblock;
+++	struct buffer_head dummy;
+++	void *handle;
+++	int i, rc = 0, failed = 0, needed_blocks;
+++
+++	blocksize = inode->i_sb->s_blocksize;
+++	blocks_per_page = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
+++	iblock = page->index * blocks_per_page;
+++
+++	for (i = 0; i < blocks_per_page; i++, iblock++) {
+++		blocks[i] = ext4_bmap(inode->i_mapping, iblock);
+++		if (blocks[i] == 0) {
+++			failed++;
+++			if (created)
+++				created[i] = -1;
+++		} else if (created) {
+++			created[i] = 0;
+++		}
+++	}
+++
+++	if (failed == 0 || create == 0)
+++		return 0;
+++
+++	needed_blocks = ext4_writepage_trans_blocks(inode);
+++	handle = ext4_journal_start(inode, needed_blocks);
+++	if (IS_ERR(handle))
+++		return PTR_ERR(handle);
+++
+++	iblock = page->index * blocks_per_page;
+++	for (i = 0; i < blocks_per_page; i++, iblock++) {
+++		if (blocks[i] != 0)
+++			continue;
+++
+++		rc = ext4_ind_get_blocks(handle, inode, iblock, 1, &dummy,
+++					 EXT4_GET_BLOCKS_CREATE);
+++		if (rc < 0) {
+++			printk(KERN_INFO "ext4_map_inode_page: error reading "
+++					"block %ld\n", iblock);
+++			goto out;
+++		} else {
+++			if (rc > 1)
+++				WARN_ON(1);
+++			rc = 0;
+++		}
+++		/* Unmap any metadata buffers from the block mapping, to avoid
+++		 * data corruption due to direct-write from Lustre being
+++		 * clobbered by a later flush of the blockdev metadata buffer.*/
+++		if (buffer_new(&dummy))
+++			unmap_underlying_metadata(dummy.b_bdev,
+++					dummy.b_blocknr);
+++		blocks[i] = dummy.b_blocknr;
+++		if (created)
+++			created[i] = 1;
+++	}
+++
+++out:
+++	ext4_journal_stop(handle);
+++	return rc;
+++}
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:08:39.101667098 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:09:14.205662995 +0200
++@@ -4052,6 +4052,10 @@
++ 	exit_ext4_system_zone();
++ }
++ 
+++int ext4_map_inode_page(struct inode *inode, struct page *page,
+++			unsigned long *blocks, int *created, int create);
+++EXPORT_SYMBOL(ext4_map_inode_page);
+++
++ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
++ MODULE_DESCRIPTION("Fourth Extended Filesystem");
++ MODULE_LICENSE("GPL");
+diff --git a/ldiskfs/kernel_patches/patches/ext4-max-dir-size-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-max-dir-size-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..2a0d595
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-max-dir-size-2.6.32-vanilla.patch
+@@ -0,0 +1,67 @@
++Index: linux-source-2.6.32/fs/ext4/ialloc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:09:34.581668116 +0200
+++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:10:06.381677398 +0200
++@@ -818,11 +818,15 @@
++ 	sb = dir->i_sb;
++ 	ngroups = ext4_get_groups_count(sb);
++ 	trace_ext4_request_inode(dir, mode);
+++
+++	sbi = EXT4_SB(sb);
+++	if (sbi->s_max_dir_size > 0 && i_size_read(dir) >= sbi->s_max_dir_size)
+++		return ERR_PTR(-EFBIG);
+++
++ 	inode = new_inode(sb);
++ 	if (!inode)
++ 		return ERR_PTR(-ENOMEM);
++ 	ei = EXT4_I(inode);
++-	sbi = EXT4_SB(sb);
++ 
++ 	if (!goal)
++ 		goal = sbi->s_inode_goal;
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:09:59.693721980 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:06.381677398 +0200
++@@ -2239,6 +2239,7 @@
++ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
++ 		 inode_readahead_blks_store, s_inode_readahead_blks);
++ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
+++EXT4_RW_ATTR_SBI_UI(max_dir_size, s_max_dir_size);
++ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
++ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
++ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
++@@ -2253,6 +2254,7 @@
++ 	ATTR_LIST(lifetime_write_kbytes),
++ 	ATTR_LIST(inode_readahead_blks),
++ 	ATTR_LIST(inode_goal),
+++	ATTR_LIST(max_dir_size),
++ 	ATTR_LIST(mb_stats),
++ 	ATTR_LIST(mb_max_to_scan),
++ 	ATTR_LIST(mb_min_to_scan),
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:09:59.689673296 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:06.385666114 +0200
++@@ -1107,6 +1107,8 @@
++ 	unsigned int s_log_groups_per_flex;
++ 	struct flex_groups *s_flex_groups;
++ 
+++	unsigned long s_max_dir_size;
+++
++ 	/* workqueue for dio unwritten */
++ 	struct workqueue_struct *dio_unwritten_wq;
++ 
++@@ -1495,6 +1497,12 @@
++ #define EXT4_MMP_MAX_CHECK_INTERVAL    300UL
++ 
++ /*
+++ * max directory size tunable
+++ */
+++#define EXT4_DEFAULT_MAX_DIR_SIZE	0
+++#define EXT4_MAX_DIR_SIZE_NAME		"max_dir_size"
+++
+++/*
++  * Function prototypes
++  */
++ 
+diff --git a/ldiskfs/kernel_patches/patches/ext4-mballoc-extra-checks-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-mballoc-extra-checks-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..19f6b57
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-mballoc-extra-checks-2.6.32-vanilla.patch
+@@ -0,0 +1,317 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:15.325667336 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:19.989662510 +0200
++@@ -1850,6 +1850,7 @@
++ 	ext4_grpblk_t	bb_fragments;	/* nr of freespace fragments */
++ 	ext4_grpblk_t	bb_largest_free_order;/* order of largest frag in BG */
++ 	struct          list_head bb_prealloc_list;
+++	unsigned long   bb_prealloc_nr;
++ #ifdef DOUBLE_CHECK
++ 	void            *bb_bitmap;
++ #endif
++Index: linux-source-2.6.32/fs/ext4/mballoc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:10:15.333660398 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:10:19.997669436 +0200
++@@ -337,7 +337,7 @@
++ static struct kmem_cache *ext4_pspace_cachep;
++ static struct kmem_cache *ext4_ac_cachep;
++ static struct kmem_cache *ext4_free_ext_cachep;
++-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+++static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++ 					ext4_group_t group);
++ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
++ 						ext4_group_t group);
++@@ -680,7 +680,7 @@
++ }
++ 
++ static noinline_for_stack
++-void ext4_mb_generate_buddy(struct super_block *sb,
+++int ext4_mb_generate_buddy(struct super_block *sb,
++ 				void *buddy, void *bitmap, ext4_group_t group)
++ {
++ 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
++@@ -712,14 +712,13 @@
++ 	grp->bb_fragments = fragments;
++ 
++ 	if (free != grp->bb_free) {
++-		ext4_grp_locked_error(sb, group,  __func__,
++-			"EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
++-			group, free, grp->bb_free);
++-		/*
++-		 * If we intent to continue, we consider group descritor
++-		 * corrupt and update bb_free using bitmap value
++-		 */
++-		grp->bb_free = free;
+++		struct ext4_group_desc *gdp;
+++		gdp = ext4_get_group_desc (sb, group, NULL);
+++		ext4_error(sb, "group %lu: %u blocks in bitmap, %u in bb, "
+++			"%u in gd, %lu pa's\n", (long unsigned int)group,
+++			free, grp->bb_free, ext4_free_blks_count(sb, gdp),
+++			grp->bb_prealloc_nr);
+++		return -EIO;
++ 	}
++ 	mb_set_largest_free_order(sb, grp);
++ 
++@@ -730,6 +729,8 @@
++ 	EXT4_SB(sb)->s_mb_buddies_generated++;
++ 	EXT4_SB(sb)->s_mb_generation_time += period;
++ 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
+++
+++	return 0;
++ }
++ 
++ /* The buddy information is attached the buddy cache inode
++@@ -864,7 +865,7 @@
++ 	first_block = page->index * blocks_per_page;
++ 	/* init the page  */
++ 	memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
++-	for (i = 0; i < blocks_per_page; i++) {
+++	for (i = 0; i < blocks_per_page && err == 0; i++) {
++ 		int group;
++ 		struct ext4_group_info *grinfo;
++ 
++@@ -899,7 +900,7 @@
++ 			 * incore got set to the group block bitmap below
++ 			 */
++ 			ext4_lock_group(sb, group);
++-			ext4_mb_generate_buddy(sb, data, incore, group);
+++			err = ext4_mb_generate_buddy(sb, data, incore, group);
++ 			ext4_unlock_group(sb, group);
++ 			incore = NULL;
++ 		} else {
++@@ -913,7 +914,7 @@
++ 			memcpy(data, bitmap, blocksize);
++ 
++ 			/* mark all preallocated blks used in in-core bitmap */
++-			ext4_mb_generate_from_pa(sb, data, group);
+++			err = ext4_mb_generate_from_pa(sb, data, group);
++ 			ext4_mb_generate_from_freelist(sb, data, group);
++ 			ext4_unlock_group(sb, group);
++ 
++@@ -923,7 +924,8 @@
++ 			incore = data;
++ 		}
++ 	}
++-	SetPageUptodate(page);
+++	if (likely(err == 0))
+++		SetPageUptodate(page);
++ 
++ out:
++ 	if (bh) {
++@@ -2184,9 +2186,11 @@
++ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
++ {
++ 	struct super_block *sb = seq->private;
+++	struct ext4_group_desc *gdp;
++ 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
++ 	int i;
++ 	int err;
+++	int free = 0;
++ 	struct ext4_buddy e4b;
++ 	struct sg {
++ 		struct ext4_group_info info;
++@@ -2195,10 +2199,10 @@
++ 
++ 	group--;
++ 	if (group == 0)
++-		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
+++		seq_printf(seq, "#%-5s: %-5s %-5s %-5s %-5s %-5s"
++ 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
++ 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
++-			   "group", "free", "frags", "first",
+++			   "group", "free", "frags", "first", "first", "pa",
++ 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
++ 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
++ 
++@@ -2209,13 +2213,20 @@
++ 		seq_printf(seq, "#%-5u: I/O error\n", group);
++ 		return 0;
++ 	}
+++
+++	gdp = ext4_get_group_desc(sb, group, NULL);
+++	if (gdp != NULL)
+++		free = ext4_free_blks_count(sb, gdp);
+++
++ 	ext4_lock_group(sb, group);
++ 	memcpy(&sg, ext4_get_group_info(sb, group), i);
++ 	ext4_unlock_group(sb, group);
++ 	ext4_mb_unload_buddy(&e4b);
++ 
++-	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
++-			sg.info.bb_fragments, sg.info.bb_first_free);
+++	seq_printf(seq, "#%-5lu: %-5u %-5u %-5u %-5u %-5lu [",
+++			(long unsigned int)group, sg.info.bb_free, free,
+++			sg.info.bb_fragments, sg.info.bb_first_free,
+++			sg.info.bb_prealloc_nr);
++ 	for (i = 0; i <= 13; i++)
++ 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
++ 				sg.info.bb_counters[i] : 0);
++@@ -3398,23 +3409,68 @@
++ }
++ 
++ /*
+++ * check free blocks in bitmap match free block in group descriptor
+++ * do this before taking preallocated blocks into account to be able
+++ * to detect on-disk corruptions. The group lock should be hold by the
+++ * caller.
+++ */
+++int ext4_mb_check_ondisk_bitmap(struct super_block *sb, void *bitmap,
+++				struct ext4_group_desc *gdp, int group)
+++{
+++	unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
+++	unsigned short i, first, free = 0;
+++
+++	i = mb_find_next_zero_bit(bitmap, max, 0);
+++
+++	while (i < max) {
+++		first = i;
+++		i = mb_find_next_bit(bitmap, max, i);
+++		if (i > max)
+++			i = max;
+++		free += i - first;
+++		if (i < max)
+++			i = mb_find_next_zero_bit(bitmap, max, i);
+++	}
+++
+++	if (free != ext4_free_blks_count(sb, gdp)) {
+++		ext4_error(sb, "on-disk bitmap for group %d"
+++			"corrupted: %u blocks free in bitmap, %u - in gd\n",
+++			group, free, ext4_free_blks_count(sb, gdp));
+++		return -EIO;
+++	}
+++	return 0;
+++}
+++
+++/*
++  * the function goes through all preallocation in this group and marks them
++  * used in in-core bitmap. buddy must be generated from this bitmap
++  * Need to be called with ext4 group lock held
++  */
++ static noinline_for_stack
++-void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+++int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++ 					ext4_group_t group)
++ {
++ 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
++ 	struct ext4_prealloc_space *pa;
+++	struct ext4_group_desc *gdp;
++ 	struct list_head *cur;
++ 	ext4_group_t groupnr;
++ 	ext4_grpblk_t start;
++ 	int preallocated = 0;
++ 	int count = 0;
+++	int skip = 0;
+++	int err;
++ 	int len;
++ 
+++	gdp = ext4_get_group_desc (sb, group, NULL);
+++	if (gdp == NULL)
+++		return -EIO;
+++
+++	/* before applying preallocations, check bitmap consistency */
+++	err = ext4_mb_check_ondisk_bitmap(sb, bitmap, gdp, group);
+++	if (err)
+++		return err;
+++
++ 	/* all form of preallocation discards first load group,
++ 	 * so the only competing code is preallocation use.
++ 	 * we don't need any locking here
++@@ -3430,14 +3486,23 @@
++ 					     &groupnr, &start);
++ 		len = pa->pa_len;
++ 		spin_unlock(&pa->pa_lock);
++-		if (unlikely(len == 0))
+++		if (unlikely(len == 0)) {
+++			skip++;
++ 			continue;
+++		}
++ 		BUG_ON(groupnr != group);
++ 		mb_set_bits(bitmap, start, len);
++ 		preallocated += len;
++ 		count++;
++ 	}
+++	if (count + skip != grp->bb_prealloc_nr) {
+++		ext4_error(sb, "lost preallocations: "
+++			   "count %d, bb_prealloc_nr %lu, skip %d\n",
+++			   count, grp->bb_prealloc_nr, skip);
+++		return -EIO;
+++	}
++ 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
+++	return 0;
++ }
++ 
++ static void ext4_mb_pa_callback(struct rcu_head *head)
++@@ -3496,6 +3561,7 @@
++ 	 */
++ 	ext4_lock_group(sb, grp);
++ 	list_del(&pa->pa_group_list);
+++	ext4_get_group_info(sb, grp)->bb_prealloc_nr--;
++ 	ext4_unlock_group(sb, grp);
++ 
++ 	spin_lock(pa->pa_obj_lock);
++@@ -3587,6 +3653,7 @@
++ 
++ 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
++ 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
+++	grp->bb_prealloc_nr++;
++ 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
++ 
++ 	spin_lock(pa->pa_obj_lock);
++@@ -3648,6 +3715,7 @@
++ 
++ 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
++ 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
+++	grp->bb_prealloc_nr++;
++ 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
++ 
++ 	/*
++@@ -3848,6 +3916,8 @@
++ 
++ 		spin_unlock(&pa->pa_lock);
++ 
+++		BUG_ON(grp->bb_prealloc_nr == 0);
+++		grp->bb_prealloc_nr--;
++ 		list_del(&pa->pa_group_list);
++ 		list_add(&pa->u.pa_tmp_list, &list);
++ 	}
++@@ -3988,7 +4058,7 @@
++ 		if (err) {
++ 			ext4_error(sb, __func__, "Error in loading buddy "
++ 					"information for %u", group);
++-			continue;
+++			return;
++ 		}
++ 
++ 		bitmap_bh = ext4_read_block_bitmap(sb, group);
++@@ -4000,6 +4070,8 @@
++ 		}
++ 
++ 		ext4_lock_group(sb, group);
+++		BUG_ON(e4b.bd_info->bb_prealloc_nr == 0);
+++		e4b.bd_info->bb_prealloc_nr--;
++ 		list_del(&pa->pa_group_list);
++ 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
++ 		ext4_unlock_group(sb, group);
++@@ -4273,6 +4345,7 @@
++ 		}
++ 		ext4_lock_group(sb, group);
++ 		list_del(&pa->pa_group_list);
+++		ext4_get_group_info(sb, group)->bb_prealloc_nr--;
++ 		ext4_mb_release_group_pa(&e4b, pa, ac);
++ 		ext4_unlock_group(sb, group);
++ 
++Index: linux-source-2.6.32/fs/ext4/mballoc.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.h	2012-06-28 12:08:28.261665077 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.h	2012-06-28 12:10:19.997669436 +0200
++@@ -88,7 +88,7 @@
++ /*
++  * for which requests use 2^N search using buddies
++  */
++-#define MB_DEFAULT_ORDER2_REQS		2
+++#define MB_DEFAULT_ORDER2_REQS		8
++ 
++ /*
++  * default group prealloc size 512 blocks
+diff --git a/ldiskfs/kernel_patches/patches/ext4-mballoc-pa_free-mismatch-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-mballoc-pa_free-mismatch-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..0349db6
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-mballoc-pa_free-mismatch-2.6.32-vanilla.patch
+@@ -0,0 +1,111 @@
++Index: linux-source-2.6.32/fs/ext4/mballoc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:11:06.761669638 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:11:12.521665249 +0200
++@@ -3637,6 +3637,7 @@
++ 	INIT_LIST_HEAD(&pa->pa_group_list);
++ 	pa->pa_deleted = 0;
++ 	pa->pa_type = MB_INODE_PA;
+++	pa->pa_error = 0;
++ 
++ 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
++ 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
++@@ -3698,6 +3699,7 @@
++ 	INIT_LIST_HEAD(&pa->pa_group_list);
++ 	pa->pa_deleted = 0;
++ 	pa->pa_type = MB_GROUP_PA;
+++	pa->pa_error = 0;
++ 
++ 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
++ 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
++@@ -3760,7 +3762,9 @@
++ 	int err = 0;
++ 	int free = 0;
++ 
+++	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
++ 	BUG_ON(pa->pa_deleted == 0);
+++	BUG_ON(pa->pa_inode == NULL);
++ 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
++ 	grp_blk_start = pa->pa_pstart - bit;
++ 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
++@@ -3796,19 +3800,27 @@
++ 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
++ 		bit = next + 1;
++ 	}
++-	if (free != pa->pa_free) {
++-		printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
++-			pa, (unsigned long) pa->pa_lstart,
++-			(unsigned long) pa->pa_pstart,
++-			(unsigned long) pa->pa_len);
+++
+++	/* "free < pa->pa_free" means we maybe double alloc the same blocks,
+++	 * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
+++	if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
+++		ext4_error(sb, "pa free mismatch: [pa %p] "
+++				"[phy %lu] [logic %lu] [len %u] [free %u] "
+++				"[error %u] [inode %lu] [freed %u]", pa,
+++				(unsigned long)pa->pa_pstart,
+++				(unsigned long)pa->pa_lstart,
+++				(unsigned)pa->pa_len, (unsigned)pa->pa_free,
+++				(unsigned)pa->pa_error, pa->pa_inode->i_ino,
+++				free);
++ 		ext4_grp_locked_error(sb, group,
++-					__func__, "free %u, pa_free %u",
++-					free, pa->pa_free);
+++				__func__, "free %u, pa_free %u",
+++				free, pa->pa_free);
++ 		/*
++ 		 * pa is already deleted so we use the value obtained
++ 		 * from the bitmap and continue.
++ 		 */
++ 	}
+++	BUG_ON(pa->pa_free != free);
++ 	atomic_add(free, &sbi->s_mb_discarded);
++ 
++ 	return err;
++@@ -4575,6 +4587,25 @@
++ 			ac->ac_b_ex.fe_len = 0;
++ 			ar->len = 0;
++ 			ext4_mb_show_ac(ac);
+++			if (ac->ac_pa) {
+++				struct ext4_prealloc_space *pa = ac->ac_pa;
+++
+++				/* We can not make sure whether the bitmap has
+++				 * been updated or not when fail case. So can
+++				 * not revert pa_free back, just mark pa_error*/
+++				pa->pa_error++;
+++				ext4_error(sb,
+++					"Updating bitmap error: [err %d] "
+++					"[pa %p] [phy %lu] [logic %lu] "
+++					"[len %u] [free %u] [error %u] "
+++					"[inode %lu]", *errp, pa,
+++					(unsigned long)pa->pa_pstart,
+++					(unsigned long)pa->pa_lstart,
+++					(unsigned)pa->pa_len,
+++					(unsigned)pa->pa_free,
+++					(unsigned)pa->pa_error,
+++					pa->pa_inode ? pa->pa_inode->i_ino : 0);
+++			}
++ 		} else {
++ 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
++ 			ar->len = ac->ac_b_ex.fe_len;
++Index: linux-source-2.6.32/fs/ext4/mballoc.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.h	2012-06-28 12:11:06.641723341 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.h	2012-06-28 12:11:12.525665254 +0200
++@@ -20,6 +20,7 @@
++ #include <linux/version.h>
++ #include <linux/blkdev.h>
++ #include <linux/mutex.h>
+++#include <linux/genhd.h>
++ #include "ext4_jbd2.h"
++ #include "ext4.h"
++ 
++@@ -130,6 +131,7 @@
++ 	ext4_grpblk_t		pa_free;	/* how many blocks are free */
++ 	unsigned short		pa_type;	/* pa type. inode or group */
++ 	spinlock_t		*pa_obj_lock;
+++	unsigned short		pa_error;
++ 	struct inode		*pa_inode;	/* hack, for history only */
++ };
++ 
+diff --git a/ldiskfs/kernel_patches/patches/ext4-misc-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-misc-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..50f452e
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-misc-2.6.32-vanilla.patch
+@@ -0,0 +1,257 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:19.989662510 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:23.325664479 +0200
++@@ -1182,6 +1182,9 @@
++ 
++ #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
++ 
+++/* Has been moved to linux/magic.h but we need it for Lustre */
+++#define EXT4_SUPER_MAGIC	0xEF53
+++
++ /*
++  * Codes for operating systems
++  */
++@@ -1608,6 +1611,9 @@
++ extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
++ extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
++ 						ext4_group_t, int);
+++extern void ext4_mb_discard_inode_preallocations(struct inode *);
+++
+++
++ /* inode.c */
++ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
++ 		struct buffer_head *bh, ext4_fsblk_t blocknr);
++Index: linux-source-2.6.32/fs/ext4/ext4_extents.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4_extents.h	2012-06-28 12:09:30.485668675 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4_extents.h	2012-06-28 12:10:23.325664479 +0200
++@@ -58,6 +58,12 @@
++  */
++ #define EXT_STATS_
++ 
+++/*
+++ * define EXT4_ALLOC_NEEDED to 0 since block bitmap, group desc. and sb
+++ * are now accounted in ext4_ext_calc_credits_for_insert()
+++ */
+++#define EXT4_ALLOC_NEEDED 0
+++#define HAVE_EXT_PREPARE_CB_EXTENT
++ 
++ /*
++  * ext4_inode has i_block array (60 bytes total).
++@@ -160,6 +166,7 @@
++ #define EXT_INIT_MAX_LEN	(1UL << 15)
++ #define EXT_UNINIT_MAX_LEN	(EXT_INIT_MAX_LEN - 1)
++ 
+++#define EXT4_EXT_HAS_NO_TREE	/* ext4_extents_tree struct is not used*/
++ 
++ #define EXT_FIRST_EXTENT(__hdr__) \
++ 	((struct ext4_extent *) (((char *) (__hdr__)) +		\
++@@ -239,6 +246,8 @@
++ extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
++ 						   int num,
++ 						   struct ext4_ext_path *path);
+++extern int ext4_ext_calc_credits_for_insert(struct inode *,
+++					    struct ext4_ext_path *);
++ extern int ext4_can_extents_be_merged(struct inode *inode,
++ 				      struct ext4_extent *ex1,
++ 				      struct ext4_extent *ex2);
++Index: linux-source-2.6.32/fs/ext4/ext4_jbd2.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4_jbd2.c	2012-06-28 12:08:27.121667674 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4_jbd2.c	2012-06-28 12:10:23.325664479 +0200
++@@ -31,6 +31,7 @@
++ 	}
++ 	return err;
++ }
+++EXPORT_SYMBOL(__ext4_journal_get_write_access);
++ 
++ int __ext4_journal_forget(const char *where, handle_t *handle,
++ 				struct buffer_head *bh)
++@@ -107,3 +108,4 @@
++ 	}
++ 	return err;
++ }
+++EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
++Index: linux-source-2.6.32/fs/ext4/ext4_jbd2.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4_jbd2.h	2012-06-28 12:08:26.825663990 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4_jbd2.h	2012-06-28 12:10:23.325664479 +0200
++@@ -35,6 +35,8 @@
++ 	(EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)   \
++ 	 ? 27U : 8U)
++ 
+++#define ext4_journal_dirty_metadata(handle, bh)  \
+++		ext4_handle_dirty_metadata(handle, NULL, bh)
++ /* Extended attribute operations touch at most two data buffers,
++  * two bitmap buffers, and two group summaries, in addition to the inode
++  * and the superblock, which are already accounted for. */
++Index: linux-source-2.6.32/fs/ext4/extents.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/extents.c	2012-06-28 12:09:30.489673456 +0200
+++++ linux-source-2.6.32/fs/ext4/extents.c	2012-06-28 12:10:23.329669991 +0200
++@@ -2030,6 +2030,55 @@
++ }
++ 
++ /*
+++ * This routine returns max. credits extent tree can consume.
+++ * It should be OK for low-performance paths like ->writepage()
+++ * To allow many writing process to fit a single transaction,
+++ * caller should calculate credits under truncate_mutex and
+++ * pass actual path.
+++ */
+++int ext4_ext_calc_credits_for_insert(struct inode *inode,
+++				     struct ext4_ext_path *path)
+++{
+++	int depth, needed;
+++
+++	if (path) {
+++		/* probably there is space in leaf? */
+++		depth = ext_depth(inode);
+++		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
+++				< le16_to_cpu(path[depth].p_hdr->eh_max))
+++			return 1;
+++	}
+++
+++	/*
+++	 * given 32bit logical block (4294967296 blocks), max. tree
+++	 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
+++	 * let's also add one more level for imbalance.
+++	 */
+++	depth = 5;
+++
+++	/* allocation of new data block(s) */
+++	needed = 2;
+++
+++	/*
+++	 * tree can be full, so it'd need to grow in depth:
+++	 * we need one credit to modify old root, credits for
+++	 * new root will be added in split accounting
+++	 */
+++	needed += 1;
+++	/*
+++	 * Index split can happen, we'd need:
+++	 *    allocate intermediate indexes (bitmap + group)
+++	 *  + change two blocks at each level, but root (already included)
+++	 */
+++	needed += (depth * 2) + (depth * 2);
+++
+++	/* any allocation modifies superblock */
+++	needed += 1;
+++
+++	return needed;
+++}
+++
+++/*
++  * How many index/leaf blocks need to change/allocate to modify nrblocks?
++  *
++  * if nrblocks are fit in a single extent (chunk flag is 1), then
++@@ -3890,3 +3939,14 @@
++ #endif
++ }
++ 
+++EXPORT_SYMBOL(ext4_ext_store_pblock);
+++EXPORT_SYMBOL(ext4_ext_search_right);
+++EXPORT_SYMBOL(ext4_ext_search_left);
+++EXPORT_SYMBOL(ext_pblock);
+++EXPORT_SYMBOL(ext4_ext_insert_extent);
+++EXPORT_SYMBOL(ext4_mb_new_blocks);
+++EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
+++EXPORT_SYMBOL(ext4_mark_inode_dirty);
+++EXPORT_SYMBOL(ext4_ext_walk_space);
+++EXPORT_SYMBOL(ext4_ext_find_extent);
+++EXPORT_SYMBOL(ext4_ext_drop_refs);
++Index: linux-source-2.6.32/fs/ext4/inode.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:10:15.329671358 +0200
+++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:10:23.333666208 +0200
++@@ -5062,6 +5062,7 @@
++ 	iget_failed(inode);
++ 	return ERR_PTR(ret);
++ }
+++EXPORT_SYMBOL(ext4_iget);
++ 
++ static int ext4_inode_blocks_set(handle_t *handle,
++ 				struct ext4_inode *raw_inode,
++Index: linux-source-2.6.32/fs/ext4/mballoc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:10:19.997669436 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:10:23.333666208 +0200
++@@ -4085,6 +4085,7 @@
++ 	if (ac)
++ 		kmem_cache_free(ext4_ac_cachep, ac);
++ }
+++EXPORT_SYMBOL(ext4_discard_preallocations);
++ 
++ /*
++  * finds all preallocated spaces and return blocks being freed to them
++@@ -4879,3 +4880,6 @@
++ 		kmem_cache_free(ext4_ac_cachep, ac);
++ 	return;
++ }
+++
+++EXPORT_SYMBOL(ext4_free_blocks);
+++
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:15.337667957 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:23.337667624 +0200
++@@ -127,6 +127,7 @@
++ 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
++ 		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
++ }
+++EXPORT_SYMBOL(ext4_itable_unused_count);
++ 
++ void ext4_block_bitmap_set(struct super_block *sb,
++ 			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
++@@ -1101,10 +1102,12 @@
++ 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
++ 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
++ 	Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize,
+++	Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
++ 	Opt_usrquota, Opt_grpquota, Opt_i_version,
++ 	Opt_stripe, Opt_delalloc, Opt_nodelalloc,
++ 	Opt_block_validity, Opt_noblock_validity,
++ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
+++	Opt_mballoc,
++ 	Opt_discard, Opt_nodiscard,
++ };
++ 
++@@ -1156,6 +1159,9 @@
++ 	{Opt_noquota, "noquota"},
++ 	{Opt_quota, "quota"},
++ 	{Opt_usrquota, "usrquota"},
+++	{Opt_iopen, "iopen"},
+++	{Opt_noiopen, "noiopen"},
+++	{Opt_iopen_nopriv, "iopen_nopriv"},
++ 	{Opt_barrier, "barrier=%u"},
++ 	{Opt_barrier, "barrier"},
++ 	{Opt_nobarrier, "nobarrier"},
++@@ -1171,6 +1177,7 @@
++ 	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
++ 	{Opt_auto_da_alloc, "auto_da_alloc"},
++ 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
+++	{Opt_mballoc, "mballoc"},
++ 	{Opt_discard, "discard"},
++ 	{Opt_nodiscard, "nodiscard"},
++ 	{Opt_err, NULL},
++@@ -1524,6 +1531,10 @@
++ 			else
++ 				clear_opt(sbi->s_mount_opt, BARRIER);
++ 			break;
+++		case Opt_iopen:
+++		case Opt_noiopen:
+++		case Opt_iopen_nopriv:
+++			break;
++ 		case Opt_ignore:
++ 			break;
++ 		case Opt_resize:
++@@ -1607,6 +1618,8 @@
++ 		case Opt_nodiscard:
++ 			clear_opt(sbi->s_mount_opt, DISCARD);
++ 			break;
+++		case Opt_mballoc:
+++			break;
++ 		default:
++ 			ext4_msg(sb, KERN_ERR,
++ 			       "Unrecognized mount option \"%s\" "
+diff --git a/ldiskfs/kernel_patches/patches/ext4-mmp-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-mmp-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..a7fca5f
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-mmp-2.6.32-vanilla.patch
+@@ -0,0 +1,575 @@
++Prevent an ext4 filesystem from being mounted multiple times.
++A sequence number is stored on disk and is periodically updated (every 5
++seconds by default) by a mounted filesystem.
++At mount time, we now wait for s_mmp_update_interval seconds to make sure
++that the MMP sequence does not change.
++In case of failure, the nodename, bdevname and the time at which the MMP
++block was last updated is displayed.
++Move all mmp code to a dedicated file (mmp.c).
++
++Signed-off-by: Andreas Dilger <adilger <at> whamcloud.com>
++Signed-off-by: Johann Lombardi <johann <at> whamcloud.com>
++---
++ fs/ext4/Makefile |    3 +-
++ fs/ext4/ext4.h   |   76 ++++++++++++-
++ fs/ext4/mmp.c    |  351 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
++ fs/ext4/super.c  |   18 +++-
++ 4 files changed, 444 insertions(+), 4 deletions(-)
++ create mode 100644 fs/ext4/mmp.c
++
++Index: linux-source-2.6.32/fs/ext4/Makefile
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/Makefile	2012-06-28 12:08:33.661737979 +0200
+++++ linux-source-2.6.32/fs/ext4/Makefile	2012-06-28 12:09:59.685666701 +0200
++@@ -6,7 +6,8 @@
++ 
++ ext4-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
++ 		ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
++-		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o
+++		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
+++		mmp.o
++ 
++ ext4-$(CONFIG_EXT4_FS_XATTR)		+= xattr.o xattr_user.o xattr_trusted.o
++ ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:09:34.581668116 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:09:59.689673296 +0200
++@@ -962,7 +962,7 @@
++ 	__le16	s_want_extra_isize; 	/* New inodes should reserve # bytes */
++ 	__le32	s_flags;		/* Miscellaneous flags */
++ 	__le16  s_raid_stride;		/* RAID stride */
++-	__le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
+++	__le16  s_mmp_update_interval;  /* # seconds to wait in MMP checking */
++ 	__le64  s_mmp_block;            /* Block for multi-mount protection */
++ 	__le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
++ 	__u8	s_log_groups_per_flex;  /* FLEX_BG group size */
++@@ -1109,6 +1109,9 @@
++ 
++ 	/* workqueue for dio unwritten */
++ 	struct workqueue_struct *dio_unwritten_wq;
+++
+++	/* Kernel thread for multiple mount protection */
+++	struct task_struct *s_mmp_tsk; 
++ };
++ 
++ static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
++@@ -1248,7 +1251,8 @@
++ 					 EXT4_FEATURE_INCOMPAT_META_BG| \
++ 					 EXT4_FEATURE_INCOMPAT_EXTENTS| \
++ 					 EXT4_FEATURE_INCOMPAT_64BIT| \
++-					 EXT4_FEATURE_INCOMPAT_FLEX_BG)
+++					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+++					 EXT4_FEATURE_INCOMPAT_MMP)
++ #define EXT4_FEATURE_RO_COMPAT_SUPP	(EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
++ 					 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
++ 					 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
++@@ -1430,6 +1434,67 @@
++ extern struct proc_dir_entry *ext4_proc_root;
++ 
++ /*
+++ * This structure will be used for multiple mount protection. It will be
+++ * written into the block number saved in the s_mmp_block field in the
+++ * superblock. Programs that check MMP should assume that if
+++ * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
+++ * to use the filesystem, regardless of how old the timestamp is.
+++ */
+++#define EXT4_MMP_MAGIC     0x004D4D50U /* ASCII for MMP */
+++#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
+++#define EXT4_MMP_SEQ_FSCK  0xE24D4D50U /* mmp_seq value when being fscked */
+++#define EXT4_MMP_SEQ_MAX   0xE24D4D4FU /* maximum valid mmp_seq value */
+++
+++struct mmp_struct {
+++       __le32  mmp_magic;              /* Magic number for MMP */
+++       __le32  mmp_seq;                /* Sequence no. updated periodically */
+++
+++       /*
+++        * mmp_time, mmp_nodename & mmp_bdevname are only used for information
+++        * purposes and do not affect the correctness of the algorithm
+++        */
+++       __le64  mmp_time;               /* Time last updated */
+++       char    mmp_nodename[64];       /* Node which last updated MMP block */
+++       char    mmp_bdevname[32];       /* Bdev which last updated MMP block */
+++
+++       /*
+++        * mmp_check_interval is used to verify if the MMP block has been
+++        * updated on the block device. The value is updated based on the
+++        * maximum time to write the MMP block during an update cycle.
+++        */
+++       __le16  mmp_check_interval;
+++
+++       __le16  mmp_pad1;
+++       __le32  mmp_pad2[227];
+++};
+++
+++/* arguments passed to the mmp thread */
+++struct mmpd_data {
+++       struct buffer_head *bh; /* bh from initial read_mmp_block() */
+++       struct super_block *sb;  /* super block of the fs */
+++};
+++
+++/*
+++ * Check interval multiplier
+++ * The MMP block is written every update interval and initially checked every
+++ * update interval x the multiplier (the value is then adapted based on the
+++ * write latency). The reason is that writes can be delayed under load and we
+++ * don't want readers to incorrectly assume that the filesystem is no longer
+++ * in use.
+++ */
+++#define EXT4_MMP_CHECK_MULT            2UL
+++
+++/*
+++ * Minimum interval for MMP checking in seconds.
+++ */
+++#define EXT4_MMP_MIN_CHECK_INTERVAL    5UL
+++
+++/*
+++ * Maximum interval for MMP checking in seconds.
+++ */
+++#define EXT4_MMP_MAX_CHECK_INTERVAL    300UL
+++
+++/*
++  * Function prototypes
++  */
++ 
++@@ -1594,6 +1659,10 @@
++ 	__attribute__ ((format (printf, 3, 4)));
++ extern void ext4_msg(struct super_block *, const char *, const char *, ...)
++ 	__attribute__ ((format (printf, 3, 4)));
+++extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
+++			   const char *, const char *);
+++#define dump_mmp_msg(sb, mmp, msg)     __dump_mmp_msg(sb, mmp, __func__, \
+++                                                      msg)
++ extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
++ 				const char *, const char *, ...)
++ 	__attribute__ ((format (printf, 4, 5)));
++@@ -1876,6 +1945,8 @@
++ 			     __u64 start_orig, __u64 start_donor,
++ 			     __u64 len, __u64 *moved_len);
++ 
+++/* mmp.c */
+++extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
++ 
++ /*
++  * Add new method to test wether block and inode bitmaps are properly
++Index: linux-source-2.6.32/fs/ext4/mmp.c
++===================================================================
++--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++++ linux-source-2.6.32/fs/ext4/mmp.c	2012-06-28 12:09:59.689673296 +0200
++@@ -0,0 +1,351 @@
+++#include <linux/fs.h>
+++#include <linux/random.h>
+++#include <linux/buffer_head.h>
+++#include <linux/utsname.h>
+++#include <linux/kthread.h>
+++
+++#include "ext4.h"
+++
+++/*
+++ * Write the MMP block using WRITE_SYNC to try to get the block on-disk
+++ * faster.
+++ */
+++static int write_mmp_block(struct buffer_head *bh)
+++{
+++       mark_buffer_dirty(bh);
+++       lock_buffer(bh);
+++       bh->b_end_io = end_buffer_write_sync;
+++       get_bh(bh);
+++       submit_bh(WRITE_SYNC, bh);
+++       wait_on_buffer(bh);
+++       if (unlikely(!buffer_uptodate(bh)))
+++               return 1;
+++
+++       return 0;
+++}
+++
+++/*
+++ * Read the MMP block. It _must_ be read from disk and hence we clear the
+++ * uptodate flag on the buffer.
+++ */
+++static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+++                         ext4_fsblk_t mmp_block)
+++{
+++       struct mmp_struct *mmp;
+++
+++       if (*bh)
+++               clear_buffer_uptodate(*bh);
+++
+++       /* This would be sb_bread(sb, mmp_block), except we need to be sure
+++        * that the MD RAID device cache has been bypassed, and that the read
+++        * is not blocked in the elevator. */
+++       if (!*bh)
+++               *bh = sb_getblk(sb, mmp_block);
+++       if (*bh) {
+++               get_bh(*bh);
+++               lock_buffer(*bh);
+++               (*bh)->b_end_io = end_buffer_read_sync;
+++               submit_bh(READ_SYNC, *bh);
+++               wait_on_buffer(*bh);
+++               if (!buffer_uptodate(*bh)) {
+++                       brelse(*bh);
+++                       *bh = NULL;
+++               }
+++       }
+++       if (!*bh) {
+++               ext4_warning(sb, "Error while reading MMP block %llu",
+++                            mmp_block);
+++               return -EIO;
+++       }
+++
+++       mmp = (struct mmp_struct *)((*bh)->b_data);
+++       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
+++               return -EINVAL;
+++
+++       return 0;
+++}
+++
+++/*
+++ * Dump as much information as possible to help the admin.
+++ */
+++void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
+++                   const char *function, const char *msg)
+++{
+++       __ext4_warning(sb, function, msg);
+++       __ext4_warning(sb, function,
+++                      "MMP failure info: last update time: %llu, last update "
+++                      "node: %s, last update device: %s\n",
+++                      (long long unsigned int) le64_to_cpu(mmp->mmp_time),
+++                      mmp->mmp_nodename, mmp->mmp_bdevname);
+++}
+++
+++/*
+++ * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
+++ */
+++static int kmmpd(void *data)
+++{
+++       struct super_block *sb = ((struct mmpd_data *) data)->sb;
+++       struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
+++       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+++       struct mmp_struct *mmp;
+++       ext4_fsblk_t mmp_block;
+++       u32 seq = 0;
+++       unsigned long failed_writes = 0;
+++       int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
+++       unsigned mmp_check_interval;
+++       unsigned long last_update_time;
+++       unsigned long diff;
+++       int retval;
+++
+++       mmp_block = le64_to_cpu(es->s_mmp_block);
+++       mmp = (struct mmp_struct *)(bh->b_data);
+++       mmp->mmp_time = cpu_to_le64(get_seconds());
+++       /*
+++        * Start with the higher mmp_check_interval and reduce it if
+++        * the MMP block is being updated on time.
+++        */
+++       mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
+++                                EXT4_MMP_MIN_CHECK_INTERVAL);
+++       mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
+++       bdevname(bh->b_bdev, mmp->mmp_bdevname);
+++
+++       memcpy(mmp->mmp_nodename, init_utsname()->nodename,
+++              sizeof(mmp->mmp_nodename));
+++
+++       while (!kthread_should_stop()) {
+++               if (++seq > EXT4_MMP_SEQ_MAX)
+++                       seq = 1;
+++
+++               mmp->mmp_seq = cpu_to_le32(seq);
+++               mmp->mmp_time = cpu_to_le64(get_seconds());
+++               last_update_time = jiffies;
+++
+++               retval = write_mmp_block(bh);
+++               /*
+++                * Don't spew too many error messages. Print one every
+++                * (s_mmp_update_interval * 60) seconds.
+++                */
+++               if (retval) {
+++                       if ((failed_writes % 60) == 0)
+++                               ext4_error(sb, "Error writing to MMP block");
+++                       failed_writes++;
+++               }
+++
+++               if (!(le32_to_cpu(es->s_feature_incompat) &
+++                   EXT4_FEATURE_INCOMPAT_MMP)) {
+++                       ext4_warning(sb, "kmmpd being stopped since MMP feature"
+++                                    " has been disabled.");
+++                       EXT4_SB(sb)->s_mmp_tsk = NULL;
+++                       goto failed;
+++               }
+++
+++               if (sb->s_flags & MS_RDONLY) {
+++                       ext4_warning(sb, "kmmpd being stopped since filesystem "
+++                                    "has been remounted as readonly.");
+++                       EXT4_SB(sb)->s_mmp_tsk = NULL;
+++                       goto failed;
+++               }
+++
+++               diff = jiffies - last_update_time;
+++               if (diff < mmp_update_interval * HZ)
+++                       schedule_timeout_interruptible(mmp_update_interval *
+++                                                      HZ - diff);
+++
+++               /*
+++                * We need to make sure that more than mmp_check_interval
+++                * seconds have not passed since writing. If that has happened
+++                * we need to check if the MMP block is as we left it.
+++                */
+++               diff = jiffies - last_update_time;
+++               if (diff > mmp_check_interval * HZ) {
+++                       struct buffer_head *bh_check = NULL;
+++                       struct mmp_struct *mmp_check;
+++
+++                       retval = read_mmp_block(sb, &bh_check, mmp_block);
+++                       if (retval) {
+++                               ext4_error(sb, "error reading MMP data: %d",
+++                                          retval);
+++
+++                               EXT4_SB(sb)->s_mmp_tsk = NULL;
+++                               goto failed;
+++                       }
+++
+++                       mmp_check = (struct mmp_struct *)(bh_check->b_data);
+++                       if (mmp->mmp_seq != mmp_check->mmp_seq ||
+++                           memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
+++                                  sizeof(mmp->mmp_nodename))) {
+++                               dump_mmp_msg(sb, mmp_check,
+++                                            "Error while updating MMP info. "
+++                                            "The filesystem seems to have been"
+++                                            " multiply mounted.");
+++                               ext4_error(sb, "abort");
+++                               goto failed;
+++                       }
+++                       put_bh(bh_check);
+++               }
+++
+++                /*
+++                * Adjust the mmp_check_interval depending on how much time
+++                * it took for the MMP block to be written.
+++                */
+++               mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
+++                                            EXT4_MMP_MAX_CHECK_INTERVAL),
+++                                        EXT4_MMP_MIN_CHECK_INTERVAL);
+++               mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
+++       }
+++
+++       /*
+++        * Unmount seems to be clean.
+++        */
+++       mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
+++       mmp->mmp_time = cpu_to_le64(get_seconds());
+++
+++       retval = write_mmp_block(bh);
+++
+++failed:
+++       kfree(data);
+++       brelse(bh);
+++       return retval;
+++}
+++
+++/*
+++ * Get a random new sequence number but make sure it is not greater than
+++ * EXT4_MMP_SEQ_MAX.
+++ */
+++static unsigned int mmp_new_seq(void)
+++{
+++       u32 new_seq;
+++
+++       do {
+++               get_random_bytes(&new_seq, sizeof(u32));
+++       } while (new_seq > EXT4_MMP_SEQ_MAX);
+++
+++       return new_seq;
+++}
+++
+++/*
+++ * Protect the filesystem from being mounted more than once.
+++ */
+++int ext4_multi_mount_protect(struct super_block *sb,
+++                                   ext4_fsblk_t mmp_block)
+++{
+++       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+++       struct buffer_head *bh = NULL;
+++       struct mmp_struct *mmp = NULL;
+++       struct mmpd_data *mmpd_data;
+++       u32 seq;
+++       unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
+++       unsigned int wait_time = 0;
+++       int retval;
+++
+++       if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
+++           mmp_block >= ext4_blocks_count(es)) {
+++               ext4_warning(sb, "Invalid MMP block in superblock");
+++               goto failed;
+++       }
+++
+++       retval = read_mmp_block(sb, &bh, mmp_block);
+++       if (retval)
+++               goto failed;
+++
+++       mmp = (struct mmp_struct *)(bh->b_data);
+++
+++       if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
+++               mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
+++
+++       /*
+++        * If check_interval in MMP block is larger, use that instead of
+++        * update_interval from the superblock.
+++        */
+++       if (mmp->mmp_check_interval > mmp_check_interval)
+++               mmp_check_interval = mmp->mmp_check_interval;
+++
+++       seq = le32_to_cpu(mmp->mmp_seq);
+++       if (seq == EXT4_MMP_SEQ_CLEAN)
+++               goto skip;
+++
+++       if (seq == EXT4_MMP_SEQ_FSCK) {
+++               dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
+++               goto failed;
+++       }
+++
+++       wait_time = min(mmp_check_interval * 2 + 1,
+++                       mmp_check_interval + 60);
+++
+++       /* Print MMP interval if more than 20 secs. */
+++       if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
+++               ext4_warning(sb, "MMP interval %u higher than expected, please"
+++                            " wait.\n", wait_time * 2);
+++
+++       if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
+++               ext4_warning(sb, "MMP startup interrupted, failing mount\n");
+++               goto failed;
+++       }
+++
+++       retval = read_mmp_block(sb, &bh, mmp_block);
+++       if (retval)
+++               goto failed;
+++       mmp = (struct mmp_struct *)(bh->b_data);
+++       if (seq != le32_to_cpu(mmp->mmp_seq)) {
+++               dump_mmp_msg(sb, mmp,
+++                            "Device is already active on another node.");
+++               goto failed;
+++       }
+++
+++skip:
+++       /*
+++        * write a new random sequence number.
+++        */
+++       mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
+++
+++       retval = write_mmp_block(bh);
+++       if (retval)
+++               goto failed;
+++
+++       /*
+++        * wait for MMP interval and check mmp_seq.
+++        */
+++       if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
+++               ext4_warning(sb, "MMP startup interrupted, failing mount\n");
+++               goto failed;
+++       }
+++
+++       retval = read_mmp_block(sb, &bh, mmp_block);
+++       if (retval)
+++               goto failed;
+++       mmp = (struct mmp_struct *)(bh->b_data);
+++       if (seq != le32_to_cpu(mmp->mmp_seq)) {
+++               dump_mmp_msg(sb, mmp,
+++                            "Device is already active on another node.");
+++               goto failed;
+++       }
+++
+++       mmpd_data = kmalloc(sizeof(struct mmpd_data), GFP_KERNEL);
+++       if (!mmpd_data) {
+++               ext4_warning(sb, "not enough memory for mmpd_data");
+++               goto failed;
+++       }
+++       mmpd_data->sb = sb;
+++       mmpd_data->bh = bh;
+++
+++       /*
+++        * Start a kernel thread to update the MMP block periodically.
+++        */
+++       EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
+++                                            bdevname(bh->b_bdev,
+++                                                     mmp->mmp_bdevname));
+++       if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
+++               EXT4_SB(sb)->s_mmp_tsk = NULL;
+++               kfree(mmpd_data);
+++               ext4_warning(sb, "Unable to create kmmpd thread for %s.",
+++                            sb->s_id);
+++               goto failed;
+++       }
+++
+++       return 0;
+++
+++failed:
+++       brelse(bh);
+++       return 1;
+++}
+++
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:09:23.393677834 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:09:59.693721980 +0200
++@@ -40,6 +40,8 @@
++ #include <linux/log2.h>
++ #include <linux/crc16.h>
++ #include <asm/uaccess.h>
+++#include <linux/kthread.h>
+++#include <linux/utsname.h>
++ 
++ #include "ext4.h"
++ #include "ext4_jbd2.h"
++@@ -665,6 +667,8 @@
++ 		invalidate_bdev(sbi->journal_bdev);
++ 		ext4_blkdev_remove(sbi);
++ 	}
+++	if (sbi->s_mmp_tsk)
+++		kthread_stop(sbi->s_mmp_tsk);
++ 	sb->s_fs_info = NULL;
++ 	/*
++ 	 * Now that we are completely done shutting down the
++@@ -2731,6 +2735,10 @@
++ 	needs_recovery = (es->s_last_orphan != 0 ||
++ 			  EXT4_HAS_INCOMPAT_FEATURE(sb,
++ 				    EXT4_FEATURE_INCOMPAT_RECOVER));
+++	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
+++	    !(sb->s_flags & MS_RDONLY))
+++		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
+++			goto failed_mount3;
++ 
++ 	/*
++ 	 * The first inode we look at is the journal inode.  Don't try
++@@ -2981,6 +2989,8 @@
++ 		else
++ 			kfree(sbi->s_flex_groups);
++ 	}
+++	if (sbi->s_mmp_tsk)
+++		kthread_stop(sbi->s_mmp_tsk);
++ failed_mount2:
++ 	for (i = 0; i < db_count; i++)
++ 		brelse(sbi->s_group_desc[i]);
++@@ -3489,7 +3499,7 @@
++ 	struct ext4_mount_options old_opts;
++ 	ext4_group_t g;
++ 	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
++-	int err;
+++	int err = 0;
++ #ifdef CONFIG_QUOTA
++ 	int i;
++ #endif
++@@ -3611,6 +3621,13 @@
++ 				goto restore_opts;
++ 			if (!ext4_setup_super(sb, es, 0))
++ 				sb->s_flags &= ~MS_RDONLY;
+++			if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+++						    EXT4_FEATURE_INCOMPAT_MMP))
+++				if (ext4_multi_mount_protect(sb,
+++						le64_to_cpu(es->s_mmp_block))) {
+++					err = -EROFS;
+++					goto restore_opts;
+++				}
++ 		}
++ 	}
++ 	ext4_setup_system_zone(sb);
+diff --git a/ldiskfs/kernel_patches/patches/ext4-nlink-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-nlink-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..07d4721
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-nlink-2.6.32-vanilla.patch
+@@ -0,0 +1,16 @@
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:08:58.217663806 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:09:27.373665875 +0200
++@@ -1742,9 +1742,8 @@
++  */
++ static void ext4_dec_count(handle_t *handle, struct inode *inode)
++ {
++-	drop_nlink(inode);
++-	if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
++-		inc_nlink(inode);
+++	if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
+++		drop_nlink(inode);
++ }
++ 
++ 
+diff --git a/ldiskfs/kernel_patches/patches/ext4-nocmtime-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-nocmtime-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..fa66ccd
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-nocmtime-2.6.32-vanilla.patch
+@@ -0,0 +1,27 @@
++Index: linux-source-2.6.32/fs/ext4/xattr.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/xattr.c	2012-06-28 12:11:24.117664822 +0200
+++++ linux-source-2.6.32/fs/ext4/xattr.c	2012-06-28 12:11:31.053665768 +0200
++@@ -1347,7 +1347,7 @@
++ 	}
++ 	if (!error) {
++ 		ext4_xattr_update_super_block(handle, inode->i_sb);
++-		if (!(flags & XATTR_NO_CTIME))
+++		if (!IS_NOCMTIME(inode) && !(flags & XATTR_NO_CTIME))
++ 			inode->i_ctime = ext4_current_time(inode);
++ 		if (!value)
++ 			ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:11:16.365664904 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:11:31.057668333 +0200
++@@ -1444,7 +1444,8 @@
++ 	 * happen is that the times are slightly out of date
++ 	 * and/or different from the directory change time.
++ 	 */
++-	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+++	if (!IS_NOCMTIME(dir))
+++		dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
++ 	ext4_update_dx_flag(dir);
++ 	dir->i_version++;
++ 	ext4_mark_inode_dirty(handle, dir);
+diff --git a/ldiskfs/kernel_patches/patches/ext4-osd-iam-exports-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-osd-iam-exports-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..7d1461a
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-osd-iam-exports-2.6.32-vanilla.patch
+@@ -0,0 +1,68 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:39.449662782 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:42.265665313 +0200
++@@ -1684,6 +1684,9 @@
++ #define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
++ extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++ 			       struct inode *inode);
+++extern struct buffer_head *ext4_append(handle_t *handle,
+++				       struct inode *inode,
+++				       ext4_lblk_t *block, int *err);
++ 
++ /* resize.c */
++ extern int ext4_group_add(struct super_block *sb,
++Index: linux-source-2.6.32/fs/ext4/hash.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/hash.c	2012-06-28 12:08:21.745667214 +0200
+++++ linux-source-2.6.32/fs/ext4/hash.c	2012-06-28 12:10:42.265665313 +0200
++@@ -9,6 +9,7 @@
++  * License.
++  */
++ 
+++#include <linux/module.h>
++ #include <linux/fs.h>
++ #include <linux/jbd2.h>
++ #include <linux/cryptohash.h>
++@@ -206,3 +207,4 @@
++ 	hinfo->minor_hash = minor_hash;
++ 	return 0;
++ }
+++EXPORT_SYMBOL(ext4fs_dirhash);
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:39.453664850 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:42.283981062 +0200
++@@ -49,9 +49,9 @@
++ #define NAMEI_RA_SIZE	     (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
++ #define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
++ 
++-static struct buffer_head *ext4_append(handle_t *handle,
++-					struct inode *inode,
++-					ext4_lblk_t *block, int *err)
+++struct buffer_head *ext4_append(handle_t *handle,
+++				struct inode *inode,
+++				ext4_lblk_t *block, int *err)
++ {
++ 	struct buffer_head *bh;
++ 	struct ext4_inode_info *ei = EXT4_I(inode);
++@@ -76,6 +76,7 @@
++ 	up(&ei->i_append_sem);
++ 	return bh;
++ }
+++EXPORT_SYMBOL(ext4_append);
++ 
++ #ifndef assert
++ #define assert(test) J_ASSERT(test)
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:36.377665203 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:42.285668861 +0200
++@@ -412,6 +412,7 @@
++ 
++ 	ext4_handle_error(sb);
++ }
+++EXPORT_SYMBOL(__ext4_std_error);
++ 
++ /*
++  * ext4_abort is a much stronger failure handler than ext4_error.  The
+diff --git a/ldiskfs/kernel_patches/patches/ext4-osd-iop-common-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-osd-iop-common-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..3eabdc3
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-osd-iop-common-2.6.32-vanilla.patch
+@@ -0,0 +1,229 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:36.373664428 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:39.449662782 +0200
++@@ -1671,6 +1671,19 @@
++ extern int ext4_orphan_del(handle_t *, struct inode *);
++ extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
++ 				__u32 start_minor_hash, __u32 *next_hash);
+++extern struct inode *ext4_create_inode(handle_t *handle,
+++				       struct inode * dir, int mode);
+++extern int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+++			  struct inode *inode);
+++extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
+++			     struct ext4_dir_entry_2 * de_del,
+++			     struct buffer_head * bh);
+++extern struct buffer_head * ext4_find_entry(struct inode *dir,
+++					    const struct qstr *d_name,
+++					    struct ext4_dir_entry_2 ** res_dir);
+++#define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
+++extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
+++			       struct inode *inode);
++ 
++ /* resize.c */
++ extern int ext4_group_add(struct super_block *sb,
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:36.377665203 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:39.453664850 +0200
++@@ -24,6 +24,7 @@
++  *	Theodore Ts'o, 2002
++  */
++ 
+++#include <linux/module.h>
++ #include <linux/fs.h>
++ #include <linux/pagemap.h>
++ #include <linux/jbd2.h>
++@@ -901,9 +902,9 @@
++  * The returned buffer_head has ->b_count elevated.  The caller is expected
++  * to brelse() it when appropriate.
++  */
++-static struct buffer_head * ext4_find_entry (struct inode *dir,
++-					const struct qstr *d_name,
++-					struct ext4_dir_entry_2 ** res_dir)
+++struct buffer_head * ext4_find_entry(struct inode *dir,
+++				      const struct qstr *d_name,
+++				      struct ext4_dir_entry_2 ** res_dir)
++ {
++ 	struct super_block *sb;
++ 	struct buffer_head *bh_use[NAMEI_RA_SIZE];
++@@ -1010,6 +1011,7 @@
++ 		brelse(bh_use[ra_ptr]);
++ 	return ret;
++ }
+++EXPORT_SYMBOL(ext4_find_entry);
++ 
++ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
++ 		       struct ext4_dir_entry_2 **res_dir, int *err)
++@@ -1533,8 +1535,8 @@
++  * may not sleep between calling this and putting something into
++  * the entry, as someone else might have used it while you slept.
++  */
++-static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
++-			  struct inode *inode)
+++int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+++		   struct inode *inode)
++ {
++ 	struct inode *dir = dentry->d_parent->d_inode;
++ 	struct buffer_head *bh;
++@@ -1585,6 +1587,7 @@
++ 		ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
++ 	return retval;
++ }
+++EXPORT_SYMBOL(ext4_add_entry);
++ 
++ /*
++  * Returns 0 for success, or a negative error value
++@@ -1725,10 +1728,10 @@
++  * ext4_delete_entry deletes a directory entry by merging it with the
++  * previous entry
++  */
++-static int ext4_delete_entry(handle_t *handle,
++-			     struct inode *dir,
++-			     struct ext4_dir_entry_2 *de_del,
++-			     struct buffer_head *bh)
+++int ext4_delete_entry(handle_t *handle,
+++		      struct inode *dir,
+++		      struct ext4_dir_entry_2 *de_del,
+++		      struct buffer_head *bh)
++ {
++ 	struct ext4_dir_entry_2 *de, *pde;
++ 	unsigned int blocksize = dir->i_sb->s_blocksize;
++@@ -1763,7 +1766,7 @@
++ 	}
++ 	return -ENOENT;
++ }
++-
+++EXPORT_SYMBOL(ext4_delete_entry);
++ /*
++  * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
++  * since this indicates that nlinks count was previously 1.
++@@ -1827,6 +1830,27 @@
++ 	return inum;
++ }
++ 
+++struct inode * ext4_create_inode(handle_t *handle, struct inode * dir, int mode)
+++{
+++	struct inode *inode;
+++
+++	inode = ext4_new_inode(handle, dir, mode, 0, EXT4_SB(dir->i_sb)->s_inode_goal);
+++	if (!IS_ERR(inode)) {
+++		if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode)) {
+++#ifdef CONFIG_LDISKFS_FS_XATTR
+++			inode->i_op = &ext4_special_inode_operations;
+++#endif
+++		} else {
+++			inode->i_op = &ext4_file_inode_operations;
+++			inode->i_fop = &ext4_file_operations;
+++			ext4_set_aops(inode);
+++		}
+++		unlock_new_inode(inode);
+++	}
+++	return inode;
+++}
+++EXPORT_SYMBOL(ext4_create_inode);
+++
++ /*
++  * By the time this is called, we already have created
++  * the directory cache entry for the new file, but it
++@@ -1903,40 +1927,33 @@
++ 	return err;
++ }
++ 
++-static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+++/* Initialize @inode as a subdirectory of @dir, and add the
+++ * "." and ".." entries into the first directory block. */
+++int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
+++			struct inode *inode)
++ {
++-	handle_t *handle;
++-	struct inode *inode;
++-	struct buffer_head *dir_block;
++-	struct ext4_dir_entry_2 *de;
+++	struct buffer_head * dir_block;
+++	struct ext4_dir_entry_2 * de;
++ 	unsigned int blocksize = dir->i_sb->s_blocksize;
++-	int err, retries = 0;
++-
++-	if (EXT4_DIR_LINK_MAX(dir))
++-		return -EMLINK;
+++	int err = 0;
++ 
++-retry:
++-	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
++-					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
++-					EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
++ 	if (IS_ERR(handle))
++ 		return PTR_ERR(handle);
++ 
++ 	if (IS_DIRSYNC(dir))
++ 		ext4_handle_sync(handle);
++ 
++-	inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
++-			       &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
++-	err = PTR_ERR(inode);
++-	if (IS_ERR(inode))
++-		goto out_stop;
++ 
++ 	inode->i_op = &ext4_dir_inode_operations;
++ 	inode->i_fop = &ext4_dir_operations;
++ 	inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
++ 	dir_block = ext4_bread(handle, inode, 0, 1, &err);
++-	if (!dir_block)
++-		goto out_clear_inode;
+++	if (!dir_block) {
+++		clear_nlink(inode);
+++		ext4_mark_inode_dirty(handle, inode);
+++		iput (inode);
+++		goto get_out;
+++	}
++ 	BUFFER_TRACE(dir_block, "get_write_access");
++ 	ext4_journal_get_write_access(handle, dir_block);
++ 	de = (struct ext4_dir_entry_2 *) dir_block->b_data;
++@@ -1958,9 +1975,45 @@
++ 	ext4_handle_dirty_metadata(handle, dir, dir_block);
++ 	brelse(dir_block);
++ 	ext4_mark_inode_dirty(handle, inode);
+++get_out:
+++	return err;
+++}
+++EXPORT_SYMBOL(ext4_add_dot_dotdot);
+++
+++
+++static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+++{
+++	handle_t *handle;
+++	struct inode *inode;
+++	int err, retries = 0;
+++
+++	if (EXT4_DIR_LINK_MAX(dir))
+++		return -EMLINK;
+++
+++retry:
+++	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+++					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+++					2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
+++	if (IS_ERR(handle))
+++		return PTR_ERR(handle);
+++
+++	if (IS_DIRSYNC(dir))
+++		handle->h_sync = 1;
+++
+++	inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
+++			       &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
+++	err = PTR_ERR(inode);
+++	if (IS_ERR(inode))
+++		goto out_stop;
+++
+++	err = ext4_add_dot_dotdot(handle, dir, inode);
+++	if (err) {
+++		unlock_new_inode(inode);
+++		goto out_stop;
+++	}
+++
++ 	err = ext4_add_entry(handle, dentry, inode);
++ 	if (err) {
++-out_clear_inode:
++ 		clear_nlink(inode);
++ 		unlock_new_inode(inode);
++ 		ext4_mark_inode_dirty(handle, inode);
+diff --git a/ldiskfs/kernel_patches/patches/ext4-pdir-fix-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-pdir-fix-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..f97493a
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-pdir-fix-2.6.32-vanilla.patch
+@@ -0,0 +1,62 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:30.009672059 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:36.373664428 +0200
++@@ -16,6 +16,7 @@
++ #ifndef _EXT4_H
++ #define _EXT4_H
++ 
+++#include <linux/dynlocks.h>
++ #include <linux/types.h>
++ #include <linux/blkdev.h>
++ #include <linux/magic.h>
++@@ -700,6 +701,10 @@
++ 	__u32	i_dtime;
++ 	ext4_fsblk_t	i_file_acl;
++ 
+++	/* following fields for parallel directory operations -bzzz */
+++	struct dynlock   i_htree_lock;
+++	struct semaphore i_append_sem;
+++
++ 	/*
++ 	 * i_block_group is the number of the block group which contains
++ 	 * this file's inode.  Constant across the lifetime of the inode,
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:30.005662279 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:36.377665203 +0200
++@@ -53,6 +53,11 @@
++ 					ext4_lblk_t *block, int *err)
++ {
++ 	struct buffer_head *bh;
+++	struct ext4_inode_info *ei = EXT4_I(inode);
+++
+++	/* with parallel dir operations all appends
+++	* have to be serialized -bzzz */
+++	down(&ei->i_append_sem);
++ 
++ 	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
++ 
++@@ -65,7 +70,9 @@
++ 			brelse(bh);
++ 			bh = NULL;
++ 		}
+++		ei->i_disksize = inode->i_size;
++ 	}
+++	up(&ei->i_append_sem);
++ 	return bh;
++ }
++ 
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:32.945663837 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:36.377665203 +0200
++@@ -702,6 +702,8 @@
++ 
++ 	ei->vfs_inode.i_version = 1;
++ 	ei->vfs_inode.i_data.writeback_index = 0;
+++	dynlock_init(&ei->i_htree_lock);
+++	sema_init(&ei->i_append_sem, 1);
++ 	memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
++ 	INIT_LIST_HEAD(&ei->i_prealloc_list);
++ 	spin_lock_init(&ei->i_prealloc_lock);
+diff --git a/ldiskfs/kernel_patches/patches/ext4-prealloc-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-prealloc-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..6197d68
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-prealloc-2.6.32-vanilla.patch
+@@ -0,0 +1,381 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:12.449674842 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:15.325667336 +0200
++@@ -1069,11 +1069,14 @@
++ 
++ 	/* tunables */
++ 	unsigned long s_stripe;
++-	unsigned int s_mb_stream_request;
+++	unsigned long s_mb_small_req;
+++	unsigned long s_mb_large_req;
++ 	unsigned int s_mb_max_to_scan;
++ 	unsigned int s_mb_min_to_scan;
++ 	unsigned int s_mb_stats;
++ 	unsigned int s_mb_order2_reqs;
+++	unsigned long *s_mb_prealloc_table;
+++	unsigned long s_mb_prealloc_table_size;
++ 	unsigned int s_mb_group_prealloc;
++ 	unsigned int s_max_writeback_mb_bump;
++ 	/* where last allocation was done - for stream allocation */
++Index: linux-source-2.6.32/fs/ext4/mballoc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:08:29.325663898 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:10:15.333660398 +0200
++@@ -1821,6 +1821,25 @@
++ 	ext4_mb_check_limits(ac, e4b, 1);
++ }
++ 
+++static void ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
+++{
+++	int i;
+++
+++	if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
+++		return;
+++
+++	for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
+++		if (sbi->s_mb_prealloc_table[i] == 0) {
+++			sbi->s_mb_prealloc_table[i] = value;
+++			return;
+++		}
+++
+++		/* they should add values in order */
+++		if (value <= sbi->s_mb_prealloc_table[i])
+++			return;
+++	}
+++}
+++
++ /*
++  * This is a special case for storages like raid5
++  * we try to find stripe-aligned chunks for stripe-size requests
++@@ -2216,6 +2235,80 @@
++ 	.show   = ext4_mb_seq_groups_show,
++ };
++ 
+++#define EXT4_MB_PREALLOC_TABLE          "prealloc_table"
+++
+++static int ext4_mb_prealloc_table_proc_read(char *page, char **start, off_t off,
+++					    int count, int *eof, void *data)
+++{
+++	struct ext4_sb_info *sbi = data;
+++	int len = 0;
+++	int i;
+++
+++	*eof = 1;
+++	if (off != 0)
+++		return 0;
+++
+++	for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
+++		len += sprintf(page + len, "%ld ",
+++			       sbi->s_mb_prealloc_table[i]);
+++	len += sprintf(page + len, "\n");
+++
+++	*start = page;
+++	return len;
+++}
+++
+++static int ext4_mb_prealloc_table_proc_write(struct file *file,
+++					     const char __user *buf,
+++					     unsigned long cnt, void *data)
+++{
+++	struct ext4_sb_info *sbi = data;
+++	unsigned long value;
+++	unsigned long prev = 0;
+++	char str[128];
+++	char *cur;
+++	char *end;
+++	unsigned long *new_table;
+++	int num = 0;
+++	int i = 0;
+++
+++	if (cnt >= sizeof(str))
+++		return -EINVAL;
+++	if (copy_from_user(str, buf, cnt))
+++		return -EFAULT;
+++
+++	num = 0;
+++	cur = str;
+++	end = str + cnt;
+++	while (cur < end) {
+++		while ((cur < end) && (*cur == ' ')) cur++;
+++		value = simple_strtol(cur, &cur, 0);
+++		if (value == 0)
+++			break;
+++		if (value <= prev)
+++			return -EINVAL;
+++		prev = value;
+++		num++;
+++	}
+++
+++	new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
+++	if (new_table == NULL)
+++		return -ENOMEM;
+++	kfree(sbi->s_mb_prealloc_table);
+++	memset(new_table, 0, num * sizeof(*new_table));
+++	sbi->s_mb_prealloc_table = new_table;
+++	sbi->s_mb_prealloc_table_size = num;
+++	cur = str;
+++	end = str + cnt;
+++	while (cur < end && i < num) {
+++	while ((cur < end) && (*cur == ' ')) cur++;
+++		value = simple_strtol(cur, &cur, 0);
+++		ext4_mb_prealloc_table_add(sbi, value);
+++		i++;
+++	}
+++
+++	return cnt;
+++}
+++
++ static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
++ {
++ 	struct super_block *sb = PDE(inode)->data;
++@@ -2455,12 +2548,56 @@
++ 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
++ 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
++ 	sbi->s_mb_stats = MB_DEFAULT_STATS;
++-	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
++ 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
++-	sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
+++
+++	if (sbi->s_stripe == 0) {
+++		sbi->s_mb_prealloc_table_size = 10;
+++		i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
+++		sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
+++		if (sbi->s_mb_prealloc_table == NULL) {
+++			kfree(sbi->s_mb_offsets);
+++			kfree(sbi->s_mb_maxs);
+++			return -ENOMEM;
+++		}
+++		memset(sbi->s_mb_prealloc_table, 0, i);
+++
+++		ext4_mb_prealloc_table_add(sbi, 4);
+++		ext4_mb_prealloc_table_add(sbi, 8);
+++		ext4_mb_prealloc_table_add(sbi, 16);
+++		ext4_mb_prealloc_table_add(sbi, 32);
+++		ext4_mb_prealloc_table_add(sbi, 64);
+++		ext4_mb_prealloc_table_add(sbi, 128);
+++		ext4_mb_prealloc_table_add(sbi, 256);
+++		ext4_mb_prealloc_table_add(sbi, 512);
+++		ext4_mb_prealloc_table_add(sbi, 1024);
+++		ext4_mb_prealloc_table_add(sbi, 2048);
+++
+++		sbi->s_mb_small_req = 256;
+++		sbi->s_mb_large_req = 1024;
+++		sbi->s_mb_group_prealloc = 512;
+++	} else {
+++		sbi->s_mb_prealloc_table_size = 3;
+++		i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
+++		sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
+++		if (sbi->s_mb_prealloc_table == NULL) {
+++			kfree(sbi->s_mb_offsets);
+++			kfree(sbi->s_mb_maxs);
+++			return -ENOMEM;
+++		}
+++		memset(sbi->s_mb_prealloc_table, 0, i);
+++
+++		ext4_mb_prealloc_table_add(sbi, sbi->s_stripe);
+++		ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 2);
+++		ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 4);
+++
+++		sbi->s_mb_small_req = sbi->s_stripe;
+++		sbi->s_mb_large_req = sbi->s_stripe * 8;
+++		sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
+++	}
++ 
++ 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
++ 	if (sbi->s_locality_groups == NULL) {
+++		kfree(sbi->s_mb_prealloc_table);
++ 		kfree(sbi->s_mb_offsets);
++ 		kfree(sbi->s_mb_maxs);
++ 		return -ENOMEM;
++@@ -2474,9 +2611,18 @@
++ 		spin_lock_init(&lg->lg_prealloc_lock);
++ 	}
++ 
++-	if (sbi->s_proc)
+++	if (sbi->s_proc) {
+++		struct proc_dir_entry *p;
++ 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
++ 				 &ext4_mb_seq_groups_fops, sb);
+++		p = create_proc_entry(EXT4_MB_PREALLOC_TABLE, S_IFREG |
+++				      S_IRUGO | S_IWUSR, sbi->s_proc);
+++		if (p) {
+++			p->data = sbi;
+++			p->read_proc = ext4_mb_prealloc_table_proc_read;
+++			p->write_proc = ext4_mb_prealloc_table_proc_write;
+++		}
+++	}
++ 
++ 	if (sbi->s_journal)
++ 		sbi->s_journal->j_commit_callback = release_blocks_on_commit;
++@@ -2556,8 +2702,10 @@
++ 	}
++ 
++ 	free_percpu(sbi->s_locality_groups);
++-	if (sbi->s_proc)
+++	if (sbi->s_proc) {
++ 		remove_proc_entry("mb_groups", sbi->s_proc);
+++		remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
+++	}
++ 
++ 	return 0;
++ }
++@@ -2852,11 +3000,12 @@
++ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
++ 				struct ext4_allocation_request *ar)
++ {
++-	int bsbits, max;
+++	int bsbits, i, wind;
++ 	ext4_lblk_t end;
++-	loff_t size, orig_size, start_off;
+++	loff_t size, orig_size;
++ 	ext4_lblk_t start, orig_start;
++ 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+++	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
++ 	struct ext4_prealloc_space *pa;
++ 
++ 	/* do normalize only data requests, metadata requests
++@@ -2886,49 +3035,35 @@
++ 	size = size << bsbits;
++ 	if (size < i_size_read(ac->ac_inode))
++ 		size = i_size_read(ac->ac_inode);
+++	size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
++ 
++-	/* max size of free chunks */
++-	max = 2 << bsbits;
+++	start = wind = 0;
++ 
++-#define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
++-		(req <= (size) || max <= (chunk_size))
+++	/* let's choose preallocation window depending on file size */
+++	for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
+++		if (size <= sbi->s_mb_prealloc_table[i]) {
+++			wind = sbi->s_mb_prealloc_table[i];
+++			break;
+++		}
+++	}
+++	size = wind;
++ 
++-	/* first, try to predict filesize */
++-	/* XXX: should this table be tunable? */
++-	start_off = 0;
++-	if (size <= 16 * 1024) {
++-		size = 16 * 1024;
++-	} else if (size <= 32 * 1024) {
++-		size = 32 * 1024;
++-	} else if (size <= 64 * 1024) {
++-		size = 64 * 1024;
++-	} else if (size <= 128 * 1024) {
++-		size = 128 * 1024;
++-	} else if (size <= 256 * 1024) {
++-		size = 256 * 1024;
++-	} else if (size <= 512 * 1024) {
++-		size = 512 * 1024;
++-	} else if (size <= 1024 * 1024) {
++-		size = 1024 * 1024;
++-	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
++-		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
++-						(21 - bsbits)) << 21;
++-		size = 2 * 1024 * 1024;
++-	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
++-		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
++-							(22 - bsbits)) << 22;
++-		size = 4 * 1024 * 1024;
++-	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
++-					(8<<20)>>bsbits, max, 8 * 1024)) {
++-		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
++-							(23 - bsbits)) << 23;
++-		size = 8 * 1024 * 1024;
++-	} else {
++-		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
++-		size	  = ac->ac_o_ex.fe_len << bsbits;
+++	if (wind == 0) {
+++		__u64 tstart, tend;
+++		/* file is quite large, we now preallocate with
+++		 * the biggest configured window with regart to
+++		 * logical offset */
+++		wind = sbi->s_mb_prealloc_table[i - 1];
+++		tstart = ac->ac_o_ex.fe_logical;
+++		do_div(tstart, wind);
+++		start = tstart * wind;
+++		tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
+++		do_div(tend, wind);
+++		tend = tend * wind + wind;
+++		size = tend - start;
++ 	}
++-	orig_size = size = size >> bsbits;
++-	orig_start = start = start_off >> bsbits;
+++	orig_size = size;
+++	orig_start = start;
++ 
++ 	/* don't cover already allocated blocks in selected range */
++ 	if (ar->pleft && start <= ar->lleft) {
++@@ -3000,7 +3135,6 @@
++ 	}
++ 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
++ 			start > ac->ac_o_ex.fe_logical);
++-	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
++ 
++ 	/* now prepare goal request */
++ 
++@@ -3986,11 +4120,19 @@
++ 
++ 	/* don't use group allocation for large files */
++ 	size = max(size, isize);
++-	if (size > sbi->s_mb_stream_request) {
+++	if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
+++	    (size >= sbi->s_mb_large_req)) {
++ 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
++ 		return;
++ 	}
++ 
+++	/*
+++	 * request is so large that we don't care about
+++	 * streaming - it overweights any possible seek
+++	 */
+++	if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
+++		return;
+++
++ 	BUG_ON(ac->ac_lg != NULL);
++ 	/*
++ 	 * locality group prealloc space are per cpu. The reason for having
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:06.381677398 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:15.337667957 +0200
++@@ -2244,7 +2244,8 @@
++ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
++ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
++ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
++-EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
+++EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
+++EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
++ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
++ EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
++ 
++@@ -2259,7 +2260,8 @@
++ 	ATTR_LIST(mb_max_to_scan),
++ 	ATTR_LIST(mb_min_to_scan),
++ 	ATTR_LIST(mb_order2_req),
++-	ATTR_LIST(mb_stream_req),
+++	ATTR_LIST(mb_small_req),
+++	ATTR_LIST(mb_large_req),
++ 	ATTR_LIST(mb_group_prealloc),
++ 	ATTR_LIST(max_writeback_mb_bump),
++ 	NULL,
++Index: linux-source-2.6.32/fs/ext4/inode.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:09:34.579776667 +0200
+++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:10:15.329671358 +0200
++@@ -2884,6 +2884,11 @@
++ 	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
++ 		return -EROFS;
++ 
+++	if (wbc->nr_to_write < sbi->s_mb_small_req) {
+++		nr_to_writebump = sbi->s_mb_small_req - wbc->nr_to_write;
+++		wbc->nr_to_write = sbi->s_mb_small_req;
+++	}
+++
++ 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
++ 		range_whole = 1;
++ 
+diff --git a/ldiskfs/kernel_patches/patches/ext4-print-inum-in-htree-warning-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-print-inum-in-htree-warning-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..0c3aee0
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-print-inum-in-htree-warning-2.6.32-vanilla.patch
+@@ -0,0 +1,15 @@
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:03.433668774 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:09.617666134 +0200
++@@ -395,8 +395,8 @@
++ 	    root->info.hash_version != DX_HASH_HALF_MD4 &&
++ 	    root->info.hash_version != DX_HASH_LEGACY) {
++ 		ext4_warning(dir->i_sb, __func__,
++-			     "Unrecognised inode hash code %d",
++-			     root->info.hash_version);
+++			     "Unrecognised inode hash code %d for directory "
+++                            "#%lu", root->info.hash_version, dir->i_ino);
++ 		brelse(bh);
++ 		*err = ERR_BAD_DX_DIR;
++ 		goto fail;
+diff --git a/ldiskfs/kernel_patches/patches/ext4-remove-cond_resched-calls-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-remove-cond_resched-calls-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..204f59c
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-remove-cond_resched-calls-2.6.32-vanilla.patch
+@@ -0,0 +1,29 @@
++Index: linux-source-2.6.32/fs/ext4/ialloc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:09:19.417666703 +0200
+++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:09:23.393677834 +0200
++@@ -1199,7 +1199,6 @@
++ 		if (!gdp)
++ 			continue;
++ 		desc_count += ext4_free_inodes_count(sb, gdp);
++-		cond_resched();
++ 	}
++ 	return desc_count;
++ #endif
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:09:19.413668742 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:09:23.393677834 +0200
++@@ -3680,11 +3680,9 @@
++ 		 * block group descriptors.  If the sparse superblocks
++ 		 * feature is turned on, then not all groups have this.
++ 		 */
++-		for (i = 0; i < ngroups; i++) {
+++		for (i = 0; i < ngroups; i++)
++ 			overhead += ext4_bg_has_super(sb, i) +
++ 				ext4_bg_num_gdb(sb, i);
++-			cond_resched();
++-		}
++ 
++ 		/*
++ 		 * Every block group has an inode bitmap, a block
+diff --git a/ldiskfs/kernel_patches/patches/ext4-store-tree-generation-at-find-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-store-tree-generation-at-find-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..3231e6c
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-store-tree-generation-at-find-2.6.32-vanilla.patch
+@@ -0,0 +1,67 @@
++Index: linux-source-2.6.32/fs/ext4/ext4_extents.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4_extents.h	2012-06-28 12:10:23.325664479 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4_extents.h	2012-06-28 12:11:48.561664193 +0200
++@@ -113,6 +113,7 @@
++  * Truncate uses it to simulate recursive walking.
++  */
++ struct ext4_ext_path {
+++	unsigned long			p_generation;
++ 	ext4_fsblk_t			p_block;
++ 	__u16				p_depth;
++ 	struct ext4_extent		*p_ext;
++Index: linux-source-2.6.32/fs/ext4/extents.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/extents.c	2012-06-28 12:10:54.881664295 +0200
+++++ linux-source-2.6.32/fs/ext4/extents.c	2012-06-28 12:11:48.565668571 +0200
++@@ -1763,7 +1763,7 @@
++ {
++ 	struct ext4_ext_path *path = NULL;
++ 	struct ext4_ext_cache cbex;
++-	struct ext4_extent *ex;
+++	struct ext4_extent _ex, *ex;
++ 	ext4_lblk_t next, start = 0, end = 0;
++ 	ext4_lblk_t last = block + num;
++ 	int depth, exists, err = 0;
++@@ -1776,17 +1776,29 @@
++ 		/* find extent for this block */
++ 		down_read(&EXT4_I(inode)->i_data_sem);
++ 		path = ext4_ext_find_extent(inode, block, path);
++-		up_read(&EXT4_I(inode)->i_data_sem);
++ 		if (IS_ERR(path)) {
+++			up_read(&EXT4_I(inode)->i_data_sem);
++ 			err = PTR_ERR(path);
++ 			path = NULL;
++ 			break;
++ 		}
++ 
+++		path[0].p_generation = EXT4_I(inode)->i_ext_generation;
+++
++ 		depth = ext_depth(inode);
++-		BUG_ON(path[depth].p_hdr == NULL);
++-		ex = path[depth].p_ext;
+++		if (unlikely(path[depth].p_hdr == NULL)) {
+++			up_read(&EXT4_I(inode)->i_data_sem);
+++			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
+++			err = -EIO;
+++			break;
+++		}
+++		ex = NULL;
+++		if (path[depth].p_ext) {
+++			_ex = *path[depth].p_ext;
+++			ex = &_ex;
+++		}
++ 		next = ext4_ext_next_allocated_block(path);
+++		up_read(&EXT4_I(inode)->i_data_sem);
++ 
++ 		exists = 0;
++ 		if (!ex) {
++@@ -1836,7 +1848,7 @@
++ 		}
++ 
++ 		BUG_ON(cbex.ec_len == 0);
++-		err = func(inode, path, &cbex, ex, cbdata);
+++		err = func(inode, path, &cbex, NULL, cbdata);
++ 		ext4_ext_drop_refs(path);
++ 
++ 		if (err < 0)
+diff --git a/ldiskfs/kernel_patches/patches/ext4-vmalloc-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-vmalloc-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..86c8818
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-vmalloc-2.6.32-vanilla.patch
+@@ -0,0 +1,210 @@
++Index: linux-source-2.6.32/fs/ext4/super.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:11:27.705663349 +0200
+++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:11:38.097666318 +0200
++@@ -639,7 +639,12 @@
++ 
++ 	for (i = 0; i < sbi->s_gdb_count; i++)
++ 		brelse(sbi->s_group_desc[i]);
++-	kfree(sbi->s_group_desc);
+++
+++	if (is_vmalloc_addr(sbi->s_group_desc))
+++		vfree(sbi->s_group_desc);
+++	else
+++		kfree(sbi->s_group_desc);
+++
++ 	if (is_vmalloc_addr(sbi->s_flex_groups))
++ 		vfree(sbi->s_flex_groups);
++ 	else
++@@ -2466,12 +2471,13 @@
++ 	unsigned long offset = 0;
++ 	unsigned long journal_devnum = 0;
++ 	unsigned long def_mount_opts;
++-	struct inode *root;
+++	struct inode *root = NULL;
++ 	char *cp;
++ 	const char *descr;
++ 	int ret = -EINVAL;
++ 	int blocksize;
++ 	unsigned int db_count;
+++	size_t size;
++ 	unsigned int i;
++ 	int needs_recovery, has_huge_files;
++ 	__u64 blocks_count;
++@@ -2794,11 +2800,18 @@
++ 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
++ 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
++ 		   EXT4_DESC_PER_BLOCK(sb);
++-	sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
++-				    GFP_KERNEL);
+++	size = (size_t)db_count * sizeof(struct buffer_head *);
+++	sbi->s_group_desc = kzalloc(size, GFP_KERNEL);
++ 	if (sbi->s_group_desc == NULL) {
++-		ext4_msg(sb, KERN_ERR, "not enough memory");
++-		goto failed_mount;
+++		sbi->s_group_desc = vmalloc(size);
+++		if (sbi->s_group_desc != NULL) {
+++			memset(sbi->s_group_desc, 0, size);
+++		} else {
+++			ext4_msg(sb, KERN_ERR, "no memory for %u groups (%u)\n",
+++				 sbi->s_groups_count, (unsigned int)size);
+++			ret = -ENOMEM;
+++			goto failed_mount;
+++		}
++ 	}
++ 
++ #ifdef __BIG_ENDIAN
++@@ -3003,17 +3016,16 @@
++ 	if (IS_ERR(root)) {
++ 		ext4_msg(sb, KERN_ERR, "get root inode failed");
++ 		ret = PTR_ERR(root);
+++		root = NULL;
++ 		goto failed_mount4;
++ 	}
++ 	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
++-		iput(root);
++ 		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
++ 		goto failed_mount4;
++ 	}
++ 	sb->s_root = d_alloc_root(root);
++ 	if (!sb->s_root) {
++ 		ext4_msg(sb, KERN_ERR, "get root dentry failed");
++-		iput(root);
++ 		ret = -ENOMEM;
++ 		goto failed_mount4;
++ 	}
++@@ -3064,6 +3076,7 @@
++ 	if (err) {
++ 		ext4_msg(sb, KERN_ERR, "failed to initalize mballoc (%d)",
++ 			 err);
+++		ret = err;
++ 		goto failed_mount4;
++ 	}
++ 
++@@ -3105,6 +3118,8 @@
++ 	goto failed_mount;
++ 
++ failed_mount4:
+++	iput(root);
+++	sb->s_root = NULL;
++ 	ext4_msg(sb, KERN_ERR, "mount failed");
++ 	destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
++ failed_mount_wq:
++@@ -3129,7 +3144,11 @@
++ failed_mount2:
++ 	for (i = 0; i < db_count; i++)
++ 		brelse(sbi->s_group_desc[i]);
++-	kfree(sbi->s_group_desc);
+++
+++	if (is_vmalloc_addr(sbi->s_group_desc))
+++		vfree(sbi->s_group_desc);
+++	else
+++		kfree(sbi->s_group_desc);
++ failed_mount:
++ 	if (sbi->s_proc) {
++ 		remove_proc_entry(sb->s_id, ext4_proc_root);
++Index: linux-source-2.6.32/fs/ext4/mballoc.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:11:12.521665249 +0200
+++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:11:38.101746741 +0200
++@@ -2469,24 +2469,37 @@
++ 	while (array_size < sizeof(*sbi->s_group_info) *
++ 	       num_meta_group_infos_max)
++ 		array_size = array_size << 1;
++-	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
++-	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
++-	 * So a two level scheme suffices for now. */
++-	sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
+++	/* A 16TB filesystem with 64-bit pointers requires an 8192 byte
+++	 * kmalloc(). Filesystems larger than 2^32 blocks (16TB normally)
+++	 * have group descriptors at least twice as large (64 bytes or
+++	 * more vs. 32 bytes for traditional ext3 filesystems), so a 128TB
+++	 * filesystem needs a 128kB allocation, which may need vmalloc(). */
+++	sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
++ 	if (sbi->s_group_info == NULL) {
++-		printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
++-		return -ENOMEM;
+++		sbi->s_group_info = vmalloc(array_size);
+++		if (sbi->s_group_info != NULL) {
+++			memset(sbi->s_group_info, 0, array_size);
+++		} else {
+++			ext4_msg(sb, KERN_ERR, "no memory for groupinfo (%u)\n",
+++				 array_size);
+++			return -ENOMEM;
+++		}
++ 	}
++ 	sbi->s_buddy_cache = new_inode(sb);
++ 	if (sbi->s_buddy_cache == NULL) {
++-		printk(KERN_ERR "EXT4-fs: can't get new inode\n");
+++		ext4_msg(sb, KERN_ERR, "can't get new inode\n");
++ 		goto err_freesgi;
++ 	}
+++	/* To avoid potentially colliding with an valid on-disk inode number,
+++	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
+++	 * not in the inode hash, so it should never be found by iget(), but
+++	 * this will avoid confusion if it ever shows up during debugging. */
+++	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
++ 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
++ 	for (i = 0; i < ngroups; i++) {
++ 		desc = ext4_get_group_desc(sb, i, NULL);
++ 		if (desc == NULL) {
++-			printk(KERN_ERR
+++			ext4_msg(sb, KERN_ERR,
++ 				"EXT4-fs: can't read descriptor %u\n", i);
++ 			goto err_freebuddy;
++ 		}
++@@ -2504,7 +2517,10 @@
++ 		kfree(sbi->s_group_info[i]);
++ 	iput(sbi->s_buddy_cache);
++ err_freesgi:
++-	kfree(sbi->s_group_info);
+++	if (is_vmalloc_addr(sbi->s_group_info))
+++		vfree(sbi->s_group_info);
+++	else
+++		kfree(sbi->s_group_info);
++ 	return -ENOMEM;
++ }
++ 
++@@ -2545,14 +2561,6 @@
++ 		i++;
++ 	} while (i <= sb->s_blocksize_bits + 1);
++ 
++-	/* init file for buddy data */
++-	ret = ext4_mb_init_backend(sb);
++-	if (ret != 0) {
++-		kfree(sbi->s_mb_offsets);
++-		kfree(sbi->s_mb_maxs);
++-		return ret;
++-	}
++-
++ 	spin_lock_init(&sbi->s_md_lock);
++ 	spin_lock_init(&sbi->s_bal_lock);
++ 
++@@ -2622,6 +2630,15 @@
++ 		spin_lock_init(&lg->lg_prealloc_lock);
++ 	}
++ 
+++	/* init file for buddy data */
+++	ret = ext4_mb_init_backend(sb);
+++	if (ret != 0) {
+++		kfree(sbi->s_mb_prealloc_table);
+++		kfree(sbi->s_mb_offsets);
+++		kfree(sbi->s_mb_maxs);
+++		return ret;
+++	}
+++
++ 	if (sbi->s_proc) {
++ 		struct proc_dir_entry *p;
++ 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
++@@ -2682,7 +2699,10 @@
++ 			EXT4_DESC_PER_BLOCK_BITS(sb);
++ 		for (i = 0; i < num_meta_group_infos; i++)
++ 			kfree(sbi->s_group_info[i]);
++-		kfree(sbi->s_group_info);
+++		if (is_vmalloc_addr(sbi->s_group_info))
+++			vfree(sbi->s_group_info);
+++		else
+++			kfree(sbi->s_group_info);
++ 	}
++ 	kfree(sbi->s_mb_offsets);
++ 	kfree(sbi->s_mb_maxs);
+diff --git a/ldiskfs/kernel_patches/patches/ext4-wantedi-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-wantedi-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..d9bf1d9
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-wantedi-2.6.32-vanilla.patch
+@@ -0,0 +1,80 @@
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:08:39.705675504 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:08:58.217663806 +0200
++@@ -144,6 +144,17 @@
++ 	u16 size;
++ };
++ 
+++/*
+++ * dentry_param used by ext4_new_inode_wantedi()
+++ */
+++#define LVFS_DENTRY_PARAM_MAGIC		20070216UL
+++struct lvfs_dentry_params
+++{
+++	unsigned long   ldp_inum;
+++	unsigned long	ldp_flags;
+++	u32		ldp_magic;
+++};
+++
++ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
++ static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
++ static inline unsigned dx_get_hash(struct dx_entry *entry);
++@@ -1753,6 +1764,19 @@
++ 	return err;
++ }
++ 
+++static unsigned ext4_dentry_goal(struct super_block *sb, struct dentry *dentry)
+++{
+++	unsigned inum = EXT4_SB(sb)->s_inode_goal;
+++
+++	if (dentry->d_fsdata != NULL) {
+++		struct lvfs_dentry_params *param = dentry->d_fsdata;
+++
+++		if (param->ldp_magic == LVFS_DENTRY_PARAM_MAGIC)
+++			inum = param->ldp_inum;
+++	}
+++	return inum;
+++}
+++
++ /*
++  * By the time this is called, we already have created
++  * the directory cache entry for the new file, but it
++@@ -1778,7 +1802,8 @@
++ 	if (IS_DIRSYNC(dir))
++ 		ext4_handle_sync(handle);
++ 
++-	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0);
+++	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name,
+++				ext4_dentry_goal(dir->i_sb, dentry));
++ 	err = PTR_ERR(inode);
++ 	if (!IS_ERR(inode)) {
++ 		inode->i_op = &ext4_file_inode_operations;
++@@ -1812,7 +1837,8 @@
++ 	if (IS_DIRSYNC(dir))
++ 		ext4_handle_sync(handle);
++ 
++-	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0);
+++	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name,
+++				ext4_dentry_goal(dir->i_sb, dentry));
++ 	err = PTR_ERR(inode);
++ 	if (!IS_ERR(inode)) {
++ 		init_special_inode(inode, inode->i_mode, rdev);
++@@ -1850,7 +1876,7 @@
++ 		ext4_handle_sync(handle);
++ 
++ 	inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
++-			       &dentry->d_name, 0);
+++			       &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
++ 	err = PTR_ERR(inode);
++ 	if (IS_ERR(inode))
++ 		goto out_stop;
++@@ -2271,7 +2297,7 @@
++ 		ext4_handle_sync(handle);
++ 
++ 	inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO,
++-			       &dentry->d_name, 0);
+++			       &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
++ 	err = PTR_ERR(inode);
++ 	if (IS_ERR(inode))
++ 		goto out_stop;
+diff --git a/ldiskfs/kernel_patches/patches/ext4-xattr-no-update-ctime-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-xattr-no-update-ctime-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..b0bee6d
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4-xattr-no-update-ctime-2.6.32-vanilla.patch
+@@ -0,0 +1,32 @@
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:06.385666114 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:12.449674842 +0200
++@@ -1503,6 +1503,13 @@
++ #define EXT4_MAX_DIR_SIZE_NAME		"max_dir_size"
++ 
++ /*
+++ * Indicates that ctime should not be updated in ext4_xattr_set_handle()
+++ */
+++#ifndef XATTR_NO_CTIME
+++#define XATTR_NO_CTIME 0x80
+++#endif
+++
+++/*
++  * Function prototypes
++  */
++ 
++Index: linux-source-2.6.32/fs/ext4/xattr.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/xattr.c	2012-06-28 12:08:30.285665815 +0200
+++++ linux-source-2.6.32/fs/ext4/xattr.c	2012-06-28 12:10:12.481658638 +0200
++@@ -1045,7 +1045,8 @@
++ 	}
++ 	if (!error) {
++ 		ext4_xattr_update_super_block(handle, inode->i_sb);
++-		inode->i_ctime = ext4_current_time(inode);
+++		if (!(flags & XATTR_NO_CTIME))
+++			inode->i_ctime = ext4_current_time(inode);
++ 		if (!value)
++ 			ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
++ 		error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+diff --git a/ldiskfs/kernel_patches/patches/ext4_data_in_dirent-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4_data_in_dirent-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..d99ae18
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4_data_in_dirent-2.6.32-vanilla.patch
+@@ -0,0 +1,521 @@
++this patch implements feature which allows ext4 fs users (e.g. Lustre)
++to store data in ext4 dirent.
++data is stored in ext4 dirent after file-name, this space is accounted
++in de->rec_len. flag EXT4_DIRENT_LUFID added to d_type if extra data
++is present.
++
++make use of dentry->d_fsdata to pass fid to ext4. so no
++changes in ext4_add_entry() interface required.
++
++Index: linux-source-2.6.32/fs/ext4/dir.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/dir.c	2012-06-28 12:08:15.765666233 +0200
+++++ linux-source-2.6.32/fs/ext4/dir.c	2012-06-28 12:11:16.361665139 +0200
++@@ -53,11 +53,18 @@
++ 
++ static unsigned char get_dtype(struct super_block *sb, int filetype)
++ {
+++	int fl_index = filetype & EXT4_FT_MASK;
+++
++ 	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
++-	    (filetype >= EXT4_FT_MAX))
+++	    (fl_index >= EXT4_FT_MAX))
++ 		return DT_UNKNOWN;
++ 
++-	return (ext4_filetype_table[filetype]);
+++	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
+++		return (ext4_filetype_table[fl_index]);
+++
+++	return (ext4_filetype_table[fl_index]) |
+++		(filetype & EXT4_DIRENT_LUFID);
+++
++ }
++ 
++ 
++@@ -70,11 +77,11 @@
++ 	const int rlen = ext4_rec_len_from_disk(de->rec_len,
++ 						dir->i_sb->s_blocksize);
++ 
++-	if (rlen < EXT4_DIR_REC_LEN(1))
+++	if (rlen < __EXT4_DIR_REC_LEN(1))
++ 		error_msg = "rec_len is smaller than minimal";
++ 	else if (rlen % 4 != 0)
++ 		error_msg = "rec_len % 4 != 0";
++-	else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
+++	else if (rlen < EXT4_DIR_REC_LEN(de))
++ 		error_msg = "rec_len is too small for name_len";
++ 	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
++ 		error_msg = "directory entry across blocks";
++@@ -181,7 +188,7 @@
++ 				 * failure will be detected in the
++ 				 * dirent test below. */
++ 				if (ext4_rec_len_from_disk(de->rec_len,
++-					sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
+++					sb->s_blocksize) < __EXT4_DIR_REC_LEN(1))
++ 					break;
++ 				i += ext4_rec_len_from_disk(de->rec_len,
++ 							    sb->s_blocksize);
++@@ -344,12 +351,17 @@
++ 	struct fname *fname, *new_fn;
++ 	struct dir_private_info *info;
++ 	int len;
+++	int extra_data = 1;
++ 
++ 	info = (struct dir_private_info *) dir_file->private_data;
++ 	p = &info->root.rb_node;
++ 
++ 	/* Create and allocate the fname structure */
++-	len = sizeof(struct fname) + dirent->name_len + 1;
+++	if (dirent->file_type & EXT4_DIRENT_LUFID)
+++		extra_data = ext4_get_dirent_data_len(dirent);
+++
+++	len = sizeof(struct fname) + dirent->name_len + extra_data;
+++
++ 	new_fn = kzalloc(len, GFP_KERNEL);
++ 	if (!new_fn)
++ 		return -ENOMEM;
++@@ -358,7 +370,7 @@
++ 	new_fn->inode = le32_to_cpu(dirent->inode);
++ 	new_fn->name_len = dirent->name_len;
++ 	new_fn->file_type = dirent->file_type;
++-	memcpy(new_fn->name, dirent->name, dirent->name_len);
+++	memcpy(new_fn->name, dirent->name, dirent->name_len + extra_data);
++ 	new_fn->name[dirent->name_len] = 0;
++ 
++ 	while (*p) {
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:58.337671542 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:16.361665139 +0200
++@@ -1258,6 +1258,7 @@
++ #define EXT4_FEATURE_INCOMPAT_64BIT		0x0080
++ #define EXT4_FEATURE_INCOMPAT_MMP               0x0100
++ #define EXT4_FEATURE_INCOMPAT_FLEX_BG		0x0200
+++#define EXT4_FEATURE_INCOMPAT_DIRDATA		0x1000
++ 
++ #define EXT4_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
++ #define EXT4_FEATURE_INCOMPAT_SUPP	(EXT4_FEATURE_INCOMPAT_FILETYPE| \
++@@ -1266,7 +1267,9 @@
++ 					 EXT4_FEATURE_INCOMPAT_EXTENTS| \
++ 					 EXT4_FEATURE_INCOMPAT_64BIT| \
++ 					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
++-					 EXT4_FEATURE_INCOMPAT_MMP)
+++					 EXT4_FEATURE_INCOMPAT_MMP| \
+++					 EXT4_FEATURE_INCOMPAT_DIRDATA)
+++
++ #define EXT4_FEATURE_RO_COMPAT_SUPP	(EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
++ 					 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
++ 					 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
++@@ -1348,6 +1351,43 @@
++ #define EXT4_FT_SYMLINK		7
++ 
++ #define EXT4_FT_MAX		8
+++#define EXT4_FT_MASK		0xf
+++
+++#if EXT4_FT_MAX > EXT4_FT_MASK
+++#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
+++#endif
+++
+++/*
+++ * d_type has 4 unused bits, so it can hold four types data. these different
+++ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
+++ * stored, in flag order, after file-name in ext4 dirent.
+++*/
+++/*
+++ * this flag is added to d_type if ext4 dirent has extra data after
+++ * filename. this data length is variable and length is stored in first byte
+++ * of data. data start after filename NUL byte.
+++ * This is used by Lustre FS.
+++  */
+++#define EXT4_DIRENT_LUFID		0x10
+++
+++#define EXT4_LUFID_MAGIC    0xAD200907UL
+++struct ext4_dentry_param {
+++	__u32  edp_magic;	/* EXT4_LUFID_MAGIC */
+++	char   edp_len;		/* size of edp_data in bytes */
+++	char   edp_data[0];	/* packed array of data */
+++} __attribute__((packed));
+++
+++static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
+++		struct ext4_dentry_param* p)
+++
+++{
+++	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
+++		return NULL;
+++	if (p && p->edp_magic == EXT4_LUFID_MAGIC)
+++		return &p->edp_len;
+++	else
+++		return NULL;
+++}
++ 
++ /*
++  * EXT4_DIR_PAD defines the directory entries boundaries
++@@ -1356,8 +1396,11 @@
++  */
++ #define EXT4_DIR_PAD			4
++ #define EXT4_DIR_ROUND			(EXT4_DIR_PAD - 1)
++-#define EXT4_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT4_DIR_ROUND) & \
+++#define __EXT4_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT4_DIR_ROUND) & \
++ 					 ~EXT4_DIR_ROUND)
+++#define EXT4_DIR_REC_LEN(de)		(__EXT4_DIR_REC_LEN(de->name_len +\
+++					ext4_get_dirent_data_len(de)))
+++
++ #define EXT4_MAX_REC_LEN		((1<<16)-1)
++ 
++ /*
++@@ -1684,7 +1727,7 @@
++ 					    struct ext4_dir_entry_2 ** res_dir);
++ #define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
++ extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++-			       struct inode *inode);
+++			       struct inode *inode, const void *, const void *);
++ extern struct buffer_head *ext4_append(handle_t *handle,
++ 				       struct inode *inode,
++ 				       ext4_lblk_t *block, int *err);
++@@ -2015,6 +2058,28 @@
++ 
++ #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
++ 
+++/*
+++ * Compute the total directory entry data length.
+++ * This includes the filename and an implicit NUL terminator (always present),
+++ * and optional extensions.  Each extension has a bit set in the high 4 bits of
+++ * de->file_type, and the extension length is the first byte in each entry.
+++ */
+++static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
+++{
+++	char *len = de->name + de->name_len + 1 /* NUL terminator */;
+++	int dlen = 0;
+++	__u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
+++
+++	while (extra_data_flags) {
+++		if (extra_data_flags & 1) {
+++			dlen += *len + (dlen == 0);
+++			len += *len;
+++		}
+++		extra_data_flags >>= 1;
+++	}
+++	return dlen;
+++}
+++
++ #endif	/* __KERNEL__ */
++ 
++ #endif	/* _EXT4_H */
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:52.005665948 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:11:16.365664904 +0200
++@@ -169,7 +169,8 @@
++ static unsigned dx_get_limit(struct dx_entry *entries);
++ static void dx_set_count(struct dx_entry *entries, unsigned value);
++ static void dx_set_limit(struct dx_entry *entries, unsigned value);
++-static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
+++static inline unsigned dx_root_limit(__u32 blocksize,
+++		struct ext4_dir_entry_2 *dot_de, unsigned infosize);
++ static unsigned dx_node_limit(struct inode *dir);
++ static struct dx_frame *dx_probe(const struct qstr *d_name,
++ 				 struct inode *dir,
++@@ -236,11 +237,12 @@
++  */
++ struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
++ {
++-       /* get dotdot first */
++-       de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
+++	BUG_ON(de->name_len != 1);
+++	/* get dotdot first */
+++	de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
++ 
++-       /* dx root info is after dotdot entry */
++-       de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
+++	/* dx root info is after dotdot entry */
+++	de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
++ 
++        return (struct dx_root_info *) de;
++ }
++@@ -285,16 +287,23 @@
++ 	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
++ }
++ 
++-static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
+++static inline unsigned dx_root_limit(__u32 blocksize,
+++		struct ext4_dir_entry_2 *dot_de, unsigned infosize)
++ {
++-	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
++-		EXT4_DIR_REC_LEN(2) - infosize;
+++	struct ext4_dir_entry_2 *dotdot_de;
+++	unsigned entry_space;
+++
+++	BUG_ON(dot_de->name_len != 1);
+++	dotdot_de = ext4_next_entry(dot_de, blocksize);
+++	entry_space = blocksize - EXT4_DIR_REC_LEN(dot_de) -
+++			 EXT4_DIR_REC_LEN(dotdot_de) - infosize;
+++
++ 	return entry_space / sizeof(struct dx_entry);
++ }
++ 
++ static inline unsigned dx_node_limit(struct inode *dir)
++ {
++-	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+++	unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0);
++ 	return entry_space / sizeof(struct dx_entry);
++ }
++ 
++@@ -341,7 +350,7 @@
++ 				printk(":%x.%u ", h.hash,
++ 				       ((char *) de - base));
++ 			}
++-			space += EXT4_DIR_REC_LEN(de->name_len);
+++			space += EXT4_DIR_REC_LEN(de);
++ 			names++;
++ 		}
++ 		de = ext4_next_entry(de, size);
++@@ -445,7 +454,8 @@
++ 
++ 	entries = (struct dx_entry *) (((char *)info) + info->info_length);
++ 
++-	if (dx_get_limit(entries) != dx_root_limit(dir,
+++	if (dx_get_limit(entries) != dx_root_limit(dir->i_sb->s_blocksize,
+++						   (struct ext4_dir_entry_2*)bh->b_data,
++ 						   info->info_length)) {
++ 		ext4_warning(dir->i_sb, __func__,
++ 			     "dx entry: limit != root limit");
++@@ -635,7 +645,7 @@
++ 	de = (struct ext4_dir_entry_2 *) bh->b_data;
++ 	top = (struct ext4_dir_entry_2 *) ((char *) de +
++ 					   dir->i_sb->s_blocksize -
++-					   EXT4_DIR_REC_LEN(0));
+++					   __EXT4_DIR_REC_LEN(0));
++ 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
++ 		if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
++ 					(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
++@@ -1048,7 +1058,7 @@
++ 			goto errout;
++ 		de = (struct ext4_dir_entry_2 *) bh->b_data;
++ 		top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize -
++-				       EXT4_DIR_REC_LEN(0));
+++				       __EXT4_DIR_REC_LEN(0));
++ 		for (; de < top; de = ext4_next_entry(de, sb->s_blocksize)) {
++ 			int off = (block << EXT4_BLOCK_SIZE_BITS(sb))
++ 				  + ((char *) de - bh->b_data);
++@@ -1210,7 +1220,7 @@
++ 	while (count--) {
++ 		struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) 
++ 						(from + (map->offs<<2));
++-		rec_len = EXT4_DIR_REC_LEN(de->name_len);
+++		rec_len = EXT4_DIR_REC_LEN(de);
++ 		memcpy (to, de, rec_len);
++ 		((struct ext4_dir_entry_2 *) to)->rec_len =
++ 				ext4_rec_len_to_disk(rec_len, blocksize);
++@@ -1234,7 +1244,7 @@
++ 	while ((char*)de < base + blocksize) {
++ 		next = ext4_next_entry(de, blocksize);
++ 		if (de->inode && de->name_len) {
++-			rec_len = EXT4_DIR_REC_LEN(de->name_len);
+++			rec_len = EXT4_DIR_REC_LEN(de);
++ 			if (de > to)
++ 				memmove(to, de, rec_len);
++ 			to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
++@@ -1364,10 +1374,16 @@
++ 	unsigned int	offset = 0;
++ 	unsigned int	blocksize = dir->i_sb->s_blocksize;
++ 	unsigned short	reclen;
++-	int		nlen, rlen, err;
+++	int		nlen, rlen, err, dlen = 0;
+++	unsigned char	*data;
++ 	char		*top;
++ 
++-	reclen = EXT4_DIR_REC_LEN(namelen);
+++	data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
+++						dentry->d_fsdata);
+++	if (data)
+++		dlen = (*data) + 1;
+++
+++	reclen = __EXT4_DIR_REC_LEN(namelen + dlen);
++ 	if (!de) {
++ 		de = (struct ext4_dir_entry_2 *)bh->b_data;
++ 		top = bh->b_data + blocksize - reclen;
++@@ -1377,7 +1393,7 @@
++ 				return -EIO;
++ 			if (ext4_match(namelen, name, de))
++ 				return -EEXIST;
++-			nlen = EXT4_DIR_REC_LEN(de->name_len);
+++			nlen = EXT4_DIR_REC_LEN(de);
++ 			rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
++ 			if ((de->inode? rlen - nlen: rlen) >= reclen)
++ 				break;
++@@ -1395,7 +1411,7 @@
++ 	}
++ 
++ 	/* By now the buffer is marked for journaling */
++-	nlen = EXT4_DIR_REC_LEN(de->name_len);
+++	nlen = EXT4_DIR_REC_LEN(de);
++ 	rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
++ 	if (de->inode) {
++ 		struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
++@@ -1411,6 +1427,12 @@
++ 		de->inode = 0;
++ 	de->name_len = namelen;
++ 	memcpy(de->name, name, namelen);
+++	if (data) {
+++		de->name[namelen] = 0;
+++		memcpy(&de->name[namelen + 1], data, *(char *) data);
+++		de->file_type |= EXT4_DIRENT_LUFID;
+++	}
+++
++ 	/*
++ 	 * XXX shouldn't update any times until successful
++ 	 * completion of syscall, but too many callers depend
++@@ -1508,7 +1530,8 @@
++ 
++ 	dx_set_block(entries, 1);
++ 	dx_set_count(entries, 1);
++-	dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
+++	dx_set_limit(entries, dx_root_limit(dir->i_sb->s_blocksize,
+++					 dot_de, sizeof(root->info)));
++ 
++ 	/* Initialize as for dx_probe */
++ 	hinfo.hash_version = dx_info->hash_version;
++@@ -1539,6 +1562,8 @@
++ 	struct buffer_head * dir_block;
++ 	struct ext4_dir_entry_2 * de;
++ 	int len, journal = 0, err = 0;
+++	int dlen = 0;
+++	char *data;
++ 
++ 	if (IS_ERR(handle))
++ 		return PTR_ERR(handle);
++@@ -1554,19 +1579,24 @@
++ 	/* the first item must be "." */
++ 	assert(de->name_len == 1 && de->name[0] == '.');
++ 	len = le16_to_cpu(de->rec_len);
++-	assert(len >= EXT4_DIR_REC_LEN(1));
++-	if (len > EXT4_DIR_REC_LEN(1)) {
+++	assert(len >= __EXT4_DIR_REC_LEN(1));
+++	if (len > __EXT4_DIR_REC_LEN(1)) {
++ 		BUFFER_TRACE(dir_block, "get_write_access");
++ 		err = ext4_journal_get_write_access(handle, dir_block);
++ 		if (err)
++ 			goto out_journal;
++ 
++ 		journal = 1;
++-		de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
+++		de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
++ 	}
++ 
++-	len -= EXT4_DIR_REC_LEN(1);
++-	assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
+++	len -= EXT4_DIR_REC_LEN(de);
+++	data = ext4_dentry_get_data(dir->i_sb,
+++			(struct ext4_dentry_param *) dentry->d_fsdata);
+++	if (data)
+++		dlen = *data + 1;
+++	assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen));
+++
++ 	de = (struct ext4_dir_entry_2 *)
++ 			((char *) de + le16_to_cpu(de->rec_len));
++ 	if (!journal) {
++@@ -1580,10 +1610,15 @@
++ 	if (len > 0)
++ 		de->rec_len = cpu_to_le16(len);
++ 	else
++-		assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
+++		assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2));
++ 	de->name_len = 2;
++ 	strcpy (de->name, "..");
++ 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
+++	if (data) {
+++		de->name[2] = 0;
+++		memcpy(&de->name[2 + 1], data, dlen);
+++		de->file_type |= EXT4_DIRENT_LUFID;
+++	}
++ 
++ out_journal:
++ 	if (journal) {
++@@ -2008,12 +2043,13 @@
++ /* Initialize @inode as a subdirectory of @dir, and add the
++  * "." and ".." entries into the first directory block. */
++ int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
++-			struct inode *inode)
+++			struct inode *inode,
+++                        const void *data1, const void *data2)
++ {
++ 	struct buffer_head * dir_block;
++ 	struct ext4_dir_entry_2 * de;
++ 	unsigned int blocksize = dir->i_sb->s_blocksize;
++-	int err = 0;
+++	int err = 0, dot_reclen;
++ 
++ 	if (IS_ERR(handle))
++ 		return PTR_ERR(handle);
++@@ -2026,28 +2062,40 @@
++ 	inode->i_fop = &ext4_dir_operations;
++ 	inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
++ 	dir_block = ext4_bread(handle, inode, 0, 1, &err);
++-	if (!dir_block) {
++-		clear_nlink(inode);
++-		ext4_mark_inode_dirty(handle, inode);
++-		iput (inode);
+++	if (!dir_block)
++ 		goto get_out;
++-	}
+++
++ 	BUFFER_TRACE(dir_block, "get_write_access");
++ 	ext4_journal_get_write_access(handle, dir_block);
++ 	de = (struct ext4_dir_entry_2 *) dir_block->b_data;
++ 	de->inode = cpu_to_le32(inode->i_ino);
++ 	de->name_len = 1;
++-	de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
++-					   blocksize);
++ 	strcpy(de->name, ".");
++ 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
+++	/* get packed fid data*/
+++	data1 = ext4_dentry_get_data(dir->i_sb,
+++				(struct ext4_dentry_param *) data1);
+++	if (data1) {
+++		de->name[1] = 0;
+++		memcpy(&de->name[2], data1, *(char *) data1);
+++		de->file_type |= EXT4_DIRENT_LUFID;
+++	}
+++	de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
+++	dot_reclen = cpu_to_le16(de->rec_len);
++ 	de = ext4_next_entry(de, blocksize);
++ 	de->inode = cpu_to_le32(dir->i_ino);
++-	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1),
+++	de->rec_len = ext4_rec_len_to_disk(blocksize - dot_reclen,
++ 					   blocksize);
++ 	de->name_len = 2;
++ 	strcpy(de->name, "..");
++ 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
+++	data2 = ext4_dentry_get_data(dir->i_sb,
+++			(struct ext4_dentry_param *) data2);
+++	if (data2) {
+++		de->name[2] = 0;
+++		memcpy(&de->name[3], data2, *(char *) data2);
+++		de->file_type |= EXT4_DIRENT_LUFID;
+++	}
++ 	inode->i_nlink = 2;
++ 	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
++ 	ext4_handle_dirty_metadata(handle, dir, dir_block);
++@@ -2084,9 +2132,12 @@
++ 	if (IS_ERR(inode))
++ 		goto out_stop;
++ 
++-	err = ext4_add_dot_dotdot(handle, dir, inode);
+++	err = ext4_add_dot_dotdot(handle, dir, inode, NULL, NULL);
++ 	if (err) {
+++		clear_nlink(inode);
++ 		unlock_new_inode(inode);
+++		ext4_mark_inode_dirty(handle, inode);
+++		iput (inode);
++ 		goto out_stop;
++ 	}
++ 
++@@ -2122,7 +2173,7 @@
++ 	int err = 0;
++ 
++ 	sb = inode->i_sb;
++-	if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
+++	if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2) ||
++ 	    !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
++ 		if (err)
++ 			ext4_error(inode->i_sb, __func__,
+diff --git a/ldiskfs/kernel_patches/patches/ext4_pdirop-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4_pdirop-2.6.32-vanilla.patch
+new file mode 100644
+index 0000000..7166474
+--- /dev/null
++++ b/ldiskfs/kernel_patches/patches/ext4_pdirop-2.6.32-vanilla.patch
+@@ -0,0 +1,2272 @@
++Index: linux-source-2.6.32/include/linux/htree_lock.h
++===================================================================
++--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++++ linux-source-2.6.32/include/linux/htree_lock.h	2012-06-28 12:11:51.973666178 +0200
++@@ -0,0 +1,187 @@
+++/*
+++ * include/linux/htree_lock.h
+++ *
+++ * Copyright (c) 2011, Whamcloud, Inc.
+++ *
+++ * Author: Liang Zhen <liang at whamcloud.com>
+++ */
+++
+++/*
+++ * htree lock
+++ *
+++ * htree_lock is an advanced lock, it can support five lock modes (concept is
+++ * taken from DLM) and it's a sleeping lock.
+++ *
+++ * most common use case is:
+++ * - create a htree_lock_head for data
+++ * - each thread (contender) creates it's own htree_lock
+++ * - contender needs to call htree_lock(lock_node, mode) to protect data and
+++ *   call htree_unlock to release lock
+++ *
+++ * Also, there is advanced use-case which is more complex, user can have
+++ * PW/PR lock on particular key, it's mostly used while user holding shared
+++ * lock on the htree (CW, CR)
+++ *
+++ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
+++ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
+++ * ...
+++ * htree_node_unlock(lock_node);; unlock the key
+++ *
+++ * Another tip is, we can have N-levels of this kind of keys, all we need to
+++ * do is specifying N-levels while creating htree_lock_head, then we can
+++ * lock/unlock a specific level by:
+++ * htree_node_lock(lock_node, mode1, key1, level1...);
+++ * do something;
+++ * htree_node_lock(lock_node, mode1, key2, level2...);
+++ * do something;
+++ * htree_node_unlock(lock_node, level2);
+++ * htree_node_unlock(lock_node, level1);
+++ *
+++ * NB: for multi-level, should be careful about locking order to avoid deadlock
+++ */
+++
+++#ifndef _LINUX_HTREE_LOCK_H
+++#define _LINUX_HTREE_LOCK_H
+++
+++#include <linux/list.h>
+++#include <linux/spinlock.h>
+++#include <linux/sched.h>
+++
+++/*
+++ * Lock Modes
+++ * more details can be found here:
+++ * http://en.wikipedia.org/wiki/Distributed_lock_manager
+++ */
+++typedef enum {
+++	HTREE_LOCK_EX	= 0, /* exclusive lock: incompatible with all others */
+++	HTREE_LOCK_PW,	     /* protected write: allows only CR users */
+++	HTREE_LOCK_PR,	     /* protected read: allow PR, CR users */
+++	HTREE_LOCK_CW,	     /* concurrent write: allow CR, CW users */
+++	HTREE_LOCK_CR,	     /* concurrent read: allow all but EX users */
+++	HTREE_LOCK_MAX,	     /* number of lock modes */
+++} htree_lock_mode_t;
+++
+++#define HTREE_LOCK_NL		HTREE_LOCK_MAX
+++#define HTREE_LOCK_INVAL	0xdead10c
+++
+++enum {
+++	HTREE_HBITS_MIN		= 2,
+++	HTREE_HBITS_DEF		= 14,
+++	HTREE_HBITS_MAX		= 32,
+++};
+++
+++enum {
+++	HTREE_EVENT_DISABLE	= (0),
+++	HTREE_EVENT_RD		= (1 << HTREE_LOCK_PR),
+++	HTREE_EVENT_WR		= (1 << HTREE_LOCK_PW),
+++	HTREE_EVENT_RDWR	= (HTREE_EVENT_RD | HTREE_EVENT_WR),
+++};
+++
+++struct htree_lock;
+++
+++typedef void (*htree_event_cb_t)(void *target, void *event);
+++
+++struct htree_lock_child {
+++	struct list_head	lc_list;	/* granted list */
+++	htree_event_cb_t	lc_callback;	/* event callback */
+++	unsigned		lc_events;	/* event types */
+++};
+++
+++struct htree_lock_head {
+++	unsigned long		lh_lock;	/* bits lock */
+++	/* blocked lock list (htree_lock) */
+++	struct list_head	lh_blocked_list;
+++	/* # key levels */
+++	u16			lh_depth;
+++	/* hash bits for key and limit number of locks */
+++	u16			lh_hbits;
+++	/* counters for blocked locks */
+++	u16			lh_nblocked[HTREE_LOCK_MAX];
+++	/* counters for granted locks */
+++	u16			lh_ngranted[HTREE_LOCK_MAX];
+++	/* private data */
+++	void			*lh_private;
+++	/* array of children locks */
+++	struct htree_lock_child	lh_children[0];
+++};
+++
+++/* htree_lock_node_t is child-lock for a specific key (ln_value) */
+++struct htree_lock_node {
+++	htree_lock_mode_t	ln_mode;
+++	/* major hash key */
+++	u16			ln_major_key;
+++	/* minor hash key */
+++	u16			ln_minor_key;
+++	struct list_head	ln_major_list;
+++	struct list_head	ln_minor_list;
+++	/* alive list, all locks (granted, blocked, listening) are on it */
+++	struct list_head	ln_alive_list;
+++	/* blocked list */
+++	struct list_head	ln_blocked_list;
+++	/* granted list */
+++	struct list_head	ln_granted_list;
+++	void			*ln_ev_target;
+++};
+++
+++struct htree_lock {
+++	struct task_struct	*lk_task;
+++	struct htree_lock_head	*lk_head;
+++	void			*lk_private;
+++	unsigned		lk_depth;
+++	htree_lock_mode_t	lk_mode;
+++	struct list_head	lk_blocked_list;
+++	struct htree_lock_node	lk_nodes[0];
+++};
+++
+++/* create a lock head, which stands for a resource */
+++struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
+++					      unsigned hbits, unsigned priv);
+++/* free a lock head */
+++void htree_lock_head_free(struct htree_lock_head *lhead);
+++/* register event callback for child lock at level @depth */
+++void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
+++			     unsigned events, htree_event_cb_t callback);
+++/* create a lock handle, which stands for a thread */
+++struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
+++/* free a lock handle */
+++void htree_lock_free(struct htree_lock *lck);
+++/* lock htree, when @wait is true, 0 is returned if the lock can't
+++ * be granted immediately */
+++int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
+++		   htree_lock_mode_t mode, int wait);
+++/* unlock htree */
+++void htree_unlock(struct htree_lock *lck);
+++/* unlock and relock htree with @new_mode */
+++int htree_change_lock_try(struct htree_lock *lck,
+++			  htree_lock_mode_t new_mode, int wait);
+++void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
+++/* require child lock (key) of htree at level @dep, @event will be sent to all
+++ * listeners on this @key while lock being granted */
+++int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
+++			u32 key, unsigned dep, int wait, void *event);
+++/* release child lock at level @dep, this lock will listen on it's key
+++ * if @event isn't NULL, event_cb will be called against @lck while granting
+++ * any other lock at level @dep with the same key */
+++void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
+++/* stop listening on child lock at level @dep */
+++void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
+++/* for debug */
+++void htree_lock_stat_print(int depth);
+++void htree_lock_stat_reset(void);
+++
+++#define htree_lock(lck, lh, mode)	htree_lock_try(lck, lh, mode, 1)
+++#define htree_change_lock(lck, mode)	htree_change_lock_try(lck, mode, 1)
+++
+++#define htree_lock_mode(lck)		((lck)->lk_mode)
+++
+++#define htree_node_lock(lck, mode, key, dep)	\
+++	htree_node_lock_try(lck, mode, key, dep, 1, NULL)
+++/* this is only safe in thread context of lock owner */
+++#define htree_node_is_granted(lck, dep)		\
+++	((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
+++	 (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
+++/* this is only safe in thread context of lock owner */
+++#define htree_node_is_listening(lck, dep)	\
+++	((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
+++
+++#endif
++Index: linux-source-2.6.32/fs/ext4/htree_lock.c
++===================================================================
++--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++++ linux-source-2.6.32/fs/ext4/htree_lock.c	2012-06-28 12:11:51.977663925 +0200
++@@ -0,0 +1,880 @@
+++/*
+++ * fs/ext4/htree_lock.c
+++ *
+++ * Copyright (c) 2011, Whamcloud, Inc.
+++ *
+++ * Author: Liang Zhen <liang at whamcloud.com>
+++ */
+++#include <linux/jbd2.h>
+++#include <linux/hash.h>
+++#include <linux/module.h>
+++#include <linux/htree_lock.h>
+++
+++enum {
+++	HTREE_LOCK_BIT_EX	= (1 << HTREE_LOCK_EX),
+++	HTREE_LOCK_BIT_PW	= (1 << HTREE_LOCK_PW),
+++	HTREE_LOCK_BIT_PR	= (1 << HTREE_LOCK_PR),
+++	HTREE_LOCK_BIT_CW	= (1 << HTREE_LOCK_CW),
+++	HTREE_LOCK_BIT_CR	= (1 << HTREE_LOCK_CR),
+++};
+++
+++enum {
+++	HTREE_LOCK_COMPAT_EX	= 0,
+++	HTREE_LOCK_COMPAT_PW	= HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
+++	HTREE_LOCK_COMPAT_PR	= HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
+++	HTREE_LOCK_COMPAT_CW	= HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
+++	HTREE_LOCK_COMPAT_CR	= HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
+++				  HTREE_LOCK_BIT_PW,
+++};
+++
+++static int htree_lock_compat[] = {
+++	[HTREE_LOCK_EX]		HTREE_LOCK_COMPAT_EX,
+++	[HTREE_LOCK_PW]		HTREE_LOCK_COMPAT_PW,
+++	[HTREE_LOCK_PR]		HTREE_LOCK_COMPAT_PR,
+++	[HTREE_LOCK_CW]		HTREE_LOCK_COMPAT_CW,
+++	[HTREE_LOCK_CR]		HTREE_LOCK_COMPAT_CR,
+++};
+++
+++/* max allowed htree-lock depth.
+++ * We only need depth=3 for ext4 although user can have higher value. */
+++#define HTREE_LOCK_DEP_MAX	16
+++
+++#ifdef HTREE_LOCK_DEBUG
+++
+++static char *hl_name[] = {
+++	[HTREE_LOCK_EX]		"EX",
+++	[HTREE_LOCK_PW]		"PW",
+++	[HTREE_LOCK_PR]		"PR",
+++	[HTREE_LOCK_CW]		"CW",
+++	[HTREE_LOCK_CR]		"CR",
+++};
+++
+++/* lock stats */
+++struct htree_lock_node_stats {
+++	unsigned long long	blocked[HTREE_LOCK_MAX];
+++	unsigned long long	granted[HTREE_LOCK_MAX];
+++	unsigned long long	retried[HTREE_LOCK_MAX];
+++	unsigned long long	events;
+++};
+++
+++struct htree_lock_stats {
+++	struct htree_lock_node_stats	nodes[HTREE_LOCK_DEP_MAX];
+++	unsigned long long	granted[HTREE_LOCK_MAX];
+++	unsigned long long	blocked[HTREE_LOCK_MAX];
+++};
+++
+++static struct htree_lock_stats hl_stats;
+++
+++void htree_lock_stat_reset(void)
+++{
+++	memset(&hl_stats, 0, sizeof(hl_stats));
+++}
+++
+++void htree_lock_stat_print(int depth)
+++{
+++	int     i;
+++	int	j;
+++
+++	printk(KERN_DEBUG "HTREE LOCK STATS:\n");
+++	for (i = 0; i < HTREE_LOCK_MAX; i++) {
+++		printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
+++		       hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
+++	}
+++	for (i = 0; i < depth; i++) {
+++		printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
+++		for (j = 0; j < HTREE_LOCK_MAX; j++) {
+++			printk(KERN_DEBUG
+++				"[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
+++				hl_name[j], hl_stats.nodes[i].granted[j],
+++				hl_stats.nodes[i].blocked[j],
+++				hl_stats.nodes[i].retried[j]);
+++		}
+++	}
+++}
+++
+++#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
+++#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
+++#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
+++#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
+++#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
+++#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
+++
+++#else /* !DEBUG */
+++
+++void htree_lock_stat_reset(void) {}
+++void htree_lock_stat_print(int depth) {}
+++
+++#define lk_grant_inc(m)	      do {} while (0)
+++#define lk_block_inc(m)	      do {} while (0)
+++#define ln_grant_inc(d, m)    do {} while (0)
+++#define ln_block_inc(d, m)    do {} while (0)
+++#define ln_retry_inc(d, m)    do {} while (0)
+++#define ln_event_inc(d)	      do {} while (0)
+++
+++#endif /* DEBUG */
+++
+++EXPORT_SYMBOL(htree_lock_stat_reset);
+++EXPORT_SYMBOL(htree_lock_stat_print);
+++
+++#define HTREE_DEP_ROOT		  (-1)
+++
+++#define htree_spin_lock(lhead, dep)				\
+++	bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
+++#define htree_spin_unlock(lhead, dep)				\
+++	bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
+++
+++#define htree_key_event_ignore(child, ln)			\
+++	(!((child)->lc_events & (1 << (ln)->ln_mode)))
+++
+++static int
+++htree_key_list_empty(struct htree_lock_node *ln)
+++{
+++	return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
+++}
+++
+++static void
+++htree_key_list_del_init(struct htree_lock_node *ln)
+++{
+++	struct htree_lock_node *tmp = NULL;
+++
+++	if (!list_empty(&ln->ln_minor_list)) {
+++		tmp = list_entry(ln->ln_minor_list.next,
+++				 struct htree_lock_node, ln_minor_list);
+++		list_del_init(&ln->ln_minor_list);
+++	}
+++
+++	if (list_empty(&ln->ln_major_list))
+++		return;
+++
+++	if (tmp == NULL) { /* not on minor key list */
+++		list_del_init(&ln->ln_major_list);
+++	} else {
+++		BUG_ON(!list_empty(&tmp->ln_major_list));
+++		list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
+++	}
+++}
+++
+++static void
+++htree_key_list_replace_init(struct htree_lock_node *old,
+++			    struct htree_lock_node *new)
+++{
+++	if (!list_empty(&old->ln_major_list))
+++		list_replace_init(&old->ln_major_list, &new->ln_major_list);
+++
+++	if (!list_empty(&old->ln_minor_list))
+++		list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
+++}
+++
+++static void
+++htree_key_event_enqueue(struct htree_lock_child *child,
+++			struct htree_lock_node *ln, int dep, void *event)
+++{
+++	struct htree_lock_node *tmp;
+++
+++	/* NB: ALWAYS called holding lhead::lh_lock(dep) */
+++	BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
+++	if (event == NULL || htree_key_event_ignore(child, ln))
+++		return;
+++
+++	/* shouldn't be a very long list */
+++	list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
+++		if (tmp->ln_mode == HTREE_LOCK_NL) {
+++			ln_event_inc(dep);
+++			if (child->lc_callback != NULL)
+++				child->lc_callback(tmp->ln_ev_target, event);
+++		}
+++	}
+++}
+++
+++static int
+++htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
+++			unsigned dep, int wait, void *event)
+++{
+++	struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
+++	struct htree_lock_node *newln = &newlk->lk_nodes[dep];
+++	struct htree_lock_node *curln = &curlk->lk_nodes[dep];
+++
+++	/* NB: ALWAYS called holding lhead::lh_lock(dep) */
+++	/* NB: we only expect PR/PW lock mode at here, only these two modes are
+++	 * allowed for htree_node_lock(asserted in htree_node_lock_internal),
+++	 * NL is only used for listener, user can't directly require NL mode */
+++	if ((curln->ln_mode == HTREE_LOCK_NL) ||
+++	    (curln->ln_mode != HTREE_LOCK_PW &&
+++	     newln->ln_mode != HTREE_LOCK_PW)) {
+++		/* no conflict, attach it on granted list of @curlk */
+++		if (curln->ln_mode != HTREE_LOCK_NL) {
+++			list_add(&newln->ln_granted_list,
+++				 &curln->ln_granted_list);
+++		} else {
+++			/* replace key owner */
+++			htree_key_list_replace_init(curln, newln);
+++		}
+++
+++		list_add(&newln->ln_alive_list, &curln->ln_alive_list);
+++		htree_key_event_enqueue(child, newln, dep, event);
+++		ln_grant_inc(dep, newln->ln_mode);
+++		return 1; /* still hold lh_lock */
+++	}
+++
+++	if (!wait) { /* can't grant and don't want to wait */
+++		ln_retry_inc(dep, newln->ln_mode);
+++		newln->ln_mode = HTREE_LOCK_INVAL;
+++		return -1; /* don't wait and just return -1 */
+++	}
+++
+++	newlk->lk_task = current;
+++	set_current_state(TASK_UNINTERRUPTIBLE);
+++	/* conflict, attach it on blocked list of curlk */
+++	list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
+++	list_add(&newln->ln_alive_list, &curln->ln_alive_list);
+++	ln_block_inc(dep, newln->ln_mode);
+++
+++	htree_spin_unlock(newlk->lk_head, dep);
+++	/* wait to be given the lock */
+++	if (newlk->lk_task != NULL)
+++		schedule();
+++	/* granted, no doubt, wake up will set me RUNNING */
+++	if (event == NULL || htree_key_event_ignore(child, newln))
+++		return 0; /* granted without lh_lock */
+++
+++	htree_spin_lock(newlk->lk_head, dep);
+++	htree_key_event_enqueue(child, newln, dep, event);
+++	return 1; /* still hold lh_lock */
+++}
+++
+++/*
+++ * get PR/PW access to particular tree-node according to @dep and @key,
+++ * it will return -1 if @wait is false and can't immediately grant this lock.
+++ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
+++ * @event if it's not NULL.
+++ * NB: ALWAYS called holding lhead::lh_lock
+++ */
+++static int
+++htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
+++			 htree_lock_mode_t mode, u32 key, unsigned dep,
+++			 int wait, void *event)
+++{
+++	LIST_HEAD		(list);
+++	struct htree_lock	*tmp;
+++	struct htree_lock	*tmp2;
+++	u16			major;
+++	u16			minor;
+++	u8			reverse;
+++	u8			ma_bits;
+++	u8			mi_bits;
+++
+++	BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
+++	BUG_ON(htree_node_is_granted(lck, dep));
+++
+++	key = hash_long(key, lhead->lh_hbits);
+++
+++	mi_bits = lhead->lh_hbits >> 1;
+++	ma_bits = lhead->lh_hbits - mi_bits;
+++
+++	lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
+++	lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
+++	lck->lk_nodes[dep].ln_mode = mode;
+++
+++	/*
+++	 * The major key list is an ordered list, so searches are started
+++	 * at the end of the list that is numerically closer to major_key,
+++	 * so at most half of the list will be walked (for well-distributed
+++	 * keys). The list traversal aborts early if the expected key
+++	 * location is passed.
+++	 */
+++	reverse = (major >= (1 << (ma_bits - 1)));
+++
+++	if (reverse) {
+++		list_for_each_entry_reverse(tmp,
+++					&lhead->lh_children[dep].lc_list,
+++					lk_nodes[dep].ln_major_list) {
+++			if (tmp->lk_nodes[dep].ln_major_key == major) {
+++				goto search_minor;
+++
+++			} else if (tmp->lk_nodes[dep].ln_major_key < major) {
+++				/* attach _after_ @tmp */
+++				list_add(&lck->lk_nodes[dep].ln_major_list,
+++					 &tmp->lk_nodes[dep].ln_major_list);
+++				goto out_grant_major;
+++			}
+++		}
+++
+++		list_add(&lck->lk_nodes[dep].ln_major_list,
+++			 &lhead->lh_children[dep].lc_list);
+++		goto out_grant_major;
+++
+++	} else {
+++		list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
+++				    lk_nodes[dep].ln_major_list) {
+++			if (tmp->lk_nodes[dep].ln_major_key == major) {
+++				goto search_minor;
+++
+++			} else if (tmp->lk_nodes[dep].ln_major_key > major) {
+++				/* insert _before_ @tmp */
+++				list_add_tail(&lck->lk_nodes[dep].ln_major_list,
+++					&tmp->lk_nodes[dep].ln_major_list);
+++				goto out_grant_major;
+++			}
+++		}
+++
+++		list_add_tail(&lck->lk_nodes[dep].ln_major_list,
+++			      &lhead->lh_children[dep].lc_list);
+++		goto out_grant_major;
+++	}
+++
+++ search_minor:
+++	/*
+++	 * NB: minor_key list doesn't have a "head", @list is just a
+++	 * temporary stub for helping list searching, make sure it's removed
+++	 * after searching.
+++	 * minor_key list is an ordered list too.
+++	 */
+++	list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
+++
+++	reverse = (minor >= (1 << (mi_bits - 1)));
+++
+++	if (reverse) {
+++		list_for_each_entry_reverse(tmp2, &list,
+++					    lk_nodes[dep].ln_minor_list) {
+++			if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
+++				goto out_enqueue;
+++
+++			} else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
+++				/* attach _after_ @tmp2 */
+++				list_add(&lck->lk_nodes[dep].ln_minor_list,
+++					 &tmp2->lk_nodes[dep].ln_minor_list);
+++				goto out_grant_minor;
+++			}
+++		}
+++
+++		list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
+++
+++	} else {
+++		list_for_each_entry(tmp2, &list,
+++				    lk_nodes[dep].ln_minor_list) {
+++			if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
+++				goto out_enqueue;
+++
+++			} else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
+++				/* insert _before_ @tmp2 */
+++				list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
+++					&tmp2->lk_nodes[dep].ln_minor_list);
+++				goto out_grant_minor;
+++			}
+++		}
+++
+++		list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
+++	}
+++
+++ out_grant_minor:
+++	if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
+++		/* new lock @lck is the first one on minor_key list, which
+++		 * means it has the smallest minor_key and it should
+++		 * replace @tmp as minor_key owner */
+++		list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
+++				  &lck->lk_nodes[dep].ln_major_list);
+++	}
+++	/* remove the temporary head */
+++	list_del(&list);
+++
+++ out_grant_major:
+++	ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
+++	return 1; /* granted with holding lh_lock */
+++
+++ out_enqueue:
+++	list_del(&list); /* remove temprary head */
+++	return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
+++}
+++
+++/*
+++ * release the key of @lck at level @dep, and grant any blocked locks.
+++ * caller will still listen on @key if @event is not NULL, which means
+++ * caller can see a event (by event_cb) while granting any lock with
+++ * the same key at level @dep.
+++ * NB: ALWAYS called holding lhead::lh_lock
+++ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
+++ */
+++static void
+++htree_node_unlock_internal(struct htree_lock_head *lhead,
+++			   struct htree_lock *curlk, unsigned dep, void *event)
+++{
+++	struct htree_lock_node	*curln = &curlk->lk_nodes[dep];
+++	struct htree_lock	*grtlk = NULL;
+++	struct htree_lock_node	*grtln;
+++	struct htree_lock	*poslk;
+++	struct htree_lock	*tmplk;
+++
+++	if (!htree_node_is_granted(curlk, dep))
+++		return;
+++
+++	if (!list_empty(&curln->ln_granted_list)) {
+++		/* there is another granted lock */
+++		grtlk = list_entry(curln->ln_granted_list.next,
+++				   struct htree_lock,
+++				   lk_nodes[dep].ln_granted_list);
+++		list_del_init(&curln->ln_granted_list);
+++	}
+++
+++	if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
+++		/*
+++		 * @curlk is the only granted lock, so we confirmed:
+++		 * a) curln is key owner (attached on major/minor_list),
+++		 *    so if there is any blocked lock, it should be attached
+++		 *    on curln->ln_blocked_list
+++		 * b) we always can grant the first blocked lock
+++		 */
+++		grtlk = list_entry(curln->ln_blocked_list.next,
+++				   struct htree_lock,
+++				   lk_nodes[dep].ln_blocked_list);
+++		BUG_ON(grtlk->lk_task == NULL);
+++		wake_up_process(grtlk->lk_task);
+++	}
+++
+++	if (event != NULL &&
+++	    lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
+++		curln->ln_ev_target = event;
+++		curln->ln_mode = HTREE_LOCK_NL; /* listen! */
+++	} else {
+++		curln->ln_mode = HTREE_LOCK_INVAL;
+++	}
+++
+++	if (grtlk == NULL) { /* I must be the only one locking this key */
+++		struct htree_lock_node *tmpln;
+++
+++		BUG_ON(htree_key_list_empty(curln));
+++
+++		if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
+++			return;
+++
+++		/* not listening */
+++		if (list_empty(&curln->ln_alive_list)) { /* no more listener */
+++			htree_key_list_del_init(curln);
+++			return;
+++		}
+++
+++		tmpln = list_entry(curln->ln_alive_list.next,
+++				   struct htree_lock_node, ln_alive_list);
+++
+++		BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
+++
+++		htree_key_list_replace_init(curln, tmpln);
+++		list_del_init(&curln->ln_alive_list);
+++
+++		return;
+++	}
+++
+++	/* have a granted lock */
+++	grtln = &grtlk->lk_nodes[dep];
+++	if (!list_empty(&curln->ln_blocked_list)) {
+++		/* only key owner can be on both lists */
+++		BUG_ON(htree_key_list_empty(curln));
+++
+++		if (list_empty(&grtln->ln_blocked_list)) {
+++			list_add(&grtln->ln_blocked_list,
+++				 &curln->ln_blocked_list);
+++		}
+++		list_del_init(&curln->ln_blocked_list);
+++	}
+++	/*
+++	 * NB: this is the tricky part:
+++	 * We have only two modes for child-lock (PR and PW), also,
+++	 * only owner of the key (attached on major/minor_list) can be on
+++	 * both blocked_list and granted_list, so @grtlk must be one
+++	 * of these two cases:
+++	 *
+++	 * a) @grtlk is taken from granted_list, which means we've granted
+++	 *    more than one lock so @grtlk has to be PR, the first blocked
+++	 *    lock must be PW and we can't grant it at all.
+++	 *    So even @grtlk is not owner of the key (empty blocked_list),
+++	 *    we don't care because we can't grant any lock.
+++	 * b) we just grant a new lock which is taken from head of blocked
+++	 *    list, and it should be the first granted lock, and it should
+++	 *    be the first one linked on blocked_list.
+++	 *
+++	 * Either way, we can get correct result by iterating blocked_list
+++	 * of @grtlk, and don't have to bother on how to find out
+++	 * owner of current key.
+++	 */
+++	list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
+++				 lk_nodes[dep].ln_blocked_list) {
+++		if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
+++		    poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
+++			break;
+++		/* grant all readers */
+++		list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
+++		list_add(&poslk->lk_nodes[dep].ln_granted_list,
+++			 &grtln->ln_granted_list);
+++
+++		BUG_ON(poslk->lk_task == NULL);
+++		wake_up_process(poslk->lk_task);
+++	}
+++
+++	/* if @curln is the owner of this key, replace it with @grtln */
+++	if (!htree_key_list_empty(curln))
+++		htree_key_list_replace_init(curln, grtln);
+++
+++	if (curln->ln_mode == HTREE_LOCK_INVAL)
+++		list_del_init(&curln->ln_alive_list);
+++}
+++
+++/*
+++ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
+++ * and 0 only if @wait is false and can't grant it immediately
+++ */
+++int
+++htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
+++		    u32 key, unsigned dep, int wait, void *event)
+++{
+++	struct htree_lock_head *lhead = lck->lk_head;
+++	int rc;
+++
+++	BUG_ON(dep >= lck->lk_depth);
+++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
+++
+++	htree_spin_lock(lhead, dep);
+++	rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
+++	if (rc != 0)
+++		htree_spin_unlock(lhead, dep);
+++	return rc >= 0;
+++}
+++EXPORT_SYMBOL(htree_node_lock_try);
+++
+++/* it's wrapper of htree_node_unlock_internal */
+++void
+++htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
+++{
+++	struct htree_lock_head *lhead = lck->lk_head;
+++
+++	BUG_ON(dep >= lck->lk_depth);
+++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
+++
+++	htree_spin_lock(lhead, dep);
+++	htree_node_unlock_internal(lhead, lck, dep, event);
+++	htree_spin_unlock(lhead, dep);
+++}
+++EXPORT_SYMBOL(htree_node_unlock);
+++
+++/* stop listening on child-lock level @dep */
+++void
+++htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
+++{
+++	struct htree_lock_node *ln = &lck->lk_nodes[dep];
+++	struct htree_lock_node *tmp;
+++
+++	BUG_ON(htree_node_is_granted(lck, dep));
+++	BUG_ON(!list_empty(&ln->ln_blocked_list));
+++	BUG_ON(!list_empty(&ln->ln_granted_list));
+++
+++	if (!htree_node_is_listening(lck, dep))
+++		return;
+++
+++	htree_spin_lock(lck->lk_head, dep);
+++	ln->ln_mode = HTREE_LOCK_INVAL;
+++	ln->ln_ev_target = NULL;
+++
+++	if (htree_key_list_empty(ln)) { /* not owner */
+++		list_del_init(&ln->ln_alive_list);
+++		goto out;
+++	}
+++
+++	/* I'm the owner... */
+++	if (list_empty(&ln->ln_alive_list)) { /* no more listener */
+++		htree_key_list_del_init(ln);
+++		goto out;
+++	}
+++
+++	tmp = list_entry(ln->ln_alive_list.next,
+++			 struct htree_lock_node, ln_alive_list);
+++
+++	BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
+++	htree_key_list_replace_init(ln, tmp);
+++	list_del_init(&ln->ln_alive_list);
+++ out:
+++	htree_spin_unlock(lck->lk_head, dep);
+++}
+++EXPORT_SYMBOL(htree_node_stop_listen);
+++
+++/* release all child-locks if we have any */
+++static void
+++htree_node_release_all(struct htree_lock *lck)
+++{
+++	int	i;
+++
+++	for (i = 0; i < lck->lk_depth; i++) {
+++		if (htree_node_is_granted(lck, i))
+++			htree_node_unlock(lck, i, NULL);
+++		else if (htree_node_is_listening(lck, i))
+++			htree_node_stop_listen(lck, i);
+++	}
+++}
+++
+++/*
+++ * obtain htree lock, it could be blocked inside if there's conflict
+++ * with any granted or blocked lock and @wait is true.
+++ * NB: ALWAYS called holding lhead::lh_lock
+++ */
+++static int
+++htree_lock_internal(struct htree_lock *lck, int wait)
+++{
+++	struct htree_lock_head *lhead = lck->lk_head;
+++	int	granted = 0;
+++	int	blocked = 0;
+++	int	i;
+++
+++	for (i = 0; i < HTREE_LOCK_MAX; i++) {
+++		if (lhead->lh_ngranted[i] != 0)
+++			granted |= 1 << i;
+++		if (lhead->lh_nblocked[i] != 0)
+++			blocked |= 1 << i;
+++	}
+++	if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
+++	    (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
+++		/* will block current lock even it just conflicts with any
+++		 * other blocked lock, so lock like EX wouldn't starve */
+++		if (!wait)
+++			return -1;
+++		lhead->lh_nblocked[lck->lk_mode]++;
+++		lk_block_inc(lck->lk_mode);
+++
+++		lck->lk_task = current;
+++		list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+++
+++		set_current_state(TASK_UNINTERRUPTIBLE);
+++		htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+++		/* wait to be given the lock */
+++		if (lck->lk_task != NULL)
+++			schedule();
+++		/* granted, no doubt. wake up will set me RUNNING */
+++		return 0; /* without lh_lock */
+++	}
+++	lhead->lh_ngranted[lck->lk_mode]++;
+++	lk_grant_inc(lck->lk_mode);
+++	return 1;
+++}
+++
+++/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
+++static void
+++htree_unlock_internal(struct htree_lock *lck)
+++{
+++	struct htree_lock_head *lhead = lck->lk_head;
+++	struct htree_lock *tmp;
+++	struct htree_lock *tmp2;
+++	int granted = 0;
+++	int i;
+++
+++	BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
+++
+++	lhead->lh_ngranted[lck->lk_mode]--;
+++	lck->lk_mode = HTREE_LOCK_INVAL;
+++
+++	for (i = 0; i < HTREE_LOCK_MAX; i++) {
+++		if (lhead->lh_ngranted[i] != 0)
+++			granted |= 1 << i;
+++	}
+++	list_for_each_entry_safe(tmp, tmp2,
+++				 &lhead->lh_blocked_list, lk_blocked_list) {
+++		/* conflict with any granted lock? */
+++		if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
+++			break;
+++
+++		list_del_init(&tmp->lk_blocked_list);
+++
+++		BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
+++
+++		lhead->lh_nblocked[tmp->lk_mode]--;
+++		lhead->lh_ngranted[tmp->lk_mode]++;
+++		granted |= 1 << tmp->lk_mode;
+++
+++		BUG_ON(tmp->lk_task == NULL);
+++		wake_up_process(tmp->lk_task);
+++	}
+++}
+++
+++/* it's wrapper of htree_lock_internal and exported interface.
+++ * It always return 1 with granted lock if @wait is true, it can return 0
+++ * if @wait is false and locking request can't be granted immediately */
+++int
+++htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
+++	       htree_lock_mode_t mode, int wait)
+++{
+++	int	rc;
+++
+++	BUG_ON(lck->lk_depth > lhead->lh_depth);
+++	BUG_ON(lck->lk_head != NULL);
+++	BUG_ON(lck->lk_task != NULL);
+++
+++	lck->lk_head = lhead;
+++	lck->lk_mode = mode;
+++
+++	htree_spin_lock(lhead, HTREE_DEP_ROOT);
+++	rc = htree_lock_internal(lck, wait);
+++	if (rc != 0)
+++		htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+++	return rc >= 0;
+++}
+++EXPORT_SYMBOL(htree_lock_try);
+++
+++/* it's wrapper of htree_unlock_internal and exported interface.
+++ * It will release all htree_node_locks and htree_lock */
+++void
+++htree_unlock(struct htree_lock *lck)
+++{
+++	BUG_ON(lck->lk_head == NULL);
+++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
+++
+++	htree_node_release_all(lck);
+++
+++	htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
+++	htree_unlock_internal(lck);
+++	htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
+++	lck->lk_head = NULL;
+++	lck->lk_task = NULL;
+++}
+++EXPORT_SYMBOL(htree_unlock);
+++
+++/* change lock mode */
+++void
+++htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
+++{
+++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
+++	lck->lk_mode = mode;
+++}
+++EXPORT_SYMBOL(htree_change_mode);
+++
+++/* release htree lock, and lock it again with new mode.
+++ * This function will first release all htree_node_locks and htree_lock,
+++ * then try to gain htree_lock with new @mode.
+++ * It always return 1 with granted lock if @wait is true, it can return 0
+++ * if @wait is false and locking request can't be granted immediately */
+++int
+++htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
+++{
+++	struct htree_lock_head *lhead = lck->lk_head;
+++	int rc;
+++
+++	BUG_ON(lhead == NULL);
+++	BUG_ON(lck->lk_mode == mode);
+++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
+++
+++	htree_node_release_all(lck);
+++
+++	htree_spin_lock(lhead, HTREE_DEP_ROOT);
+++	htree_unlock_internal(lck);
+++	lck->lk_mode = mode;
+++	rc = htree_lock_internal(lck, wait);
+++	if (rc != 0)
+++		htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+++	return rc >= 0;
+++}
+++EXPORT_SYMBOL(htree_change_lock_try);
+++
+++/* create a htree_lock head with @depth levels (number of child-locks),
+++ * it is a per resoruce structure */
+++struct htree_lock_head *
+++htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
+++{
+++	struct htree_lock_head *lhead;
+++	int  i;
+++
+++	if (depth > HTREE_LOCK_DEP_MAX) {
+++		printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
+++			depth, HTREE_LOCK_DEP_MAX);
+++		return NULL;
+++	}
+++
+++	lhead = kzalloc(offsetof(struct htree_lock_head,
+++				 lh_children[depth]) + priv, GFP_NOFS);
+++	if (lhead == NULL)
+++		return NULL;
+++
+++	if (hbits < HTREE_HBITS_MIN)
+++		lhead->lh_hbits = HTREE_HBITS_MIN;
+++	else if (hbits > HTREE_HBITS_MAX)
+++		lhead->lh_hbits = HTREE_HBITS_MAX;
+++
+++	lhead->lh_lock = 0;
+++	lhead->lh_depth = depth;
+++	INIT_LIST_HEAD(&lhead->lh_blocked_list);
+++	if (priv > 0) {
+++		lhead->lh_private = (void *)lhead +
+++			offsetof(struct htree_lock_head, lh_children[depth]);
+++	}
+++
+++	for (i = 0; i < depth; i++) {
+++		INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
+++		lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
+++	}
+++	return lhead;
+++}
+++EXPORT_SYMBOL(htree_lock_head_alloc);
+++
+++/* free the htree_lock head */
+++void
+++htree_lock_head_free(struct htree_lock_head *lhead)
+++{
+++	int     i;
+++
+++	BUG_ON(!list_empty(&lhead->lh_blocked_list));
+++	for (i = 0; i < lhead->lh_depth; i++)
+++		BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
+++	kfree(lhead);
+++}
+++EXPORT_SYMBOL(htree_lock_head_free);
+++
+++/* register event callback for @events of child-lock at level @dep */
+++void
+++htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
+++			unsigned events, htree_event_cb_t callback)
+++{
+++	BUG_ON(lhead->lh_depth <= dep);
+++	lhead->lh_children[dep].lc_events = events;
+++	lhead->lh_children[dep].lc_callback = callback;
+++}
+++EXPORT_SYMBOL(htree_lock_event_attach);
+++
+++/* allocate a htree_lock, which is per-thread structure, @pbytes is some
+++ * extra-bytes as private data for caller */
+++struct htree_lock *
+++htree_lock_alloc(unsigned depth, unsigned pbytes)
+++{
+++	struct htree_lock *lck;
+++	int i = offsetof(struct htree_lock, lk_nodes[depth]);
+++
+++	if (depth > HTREE_LOCK_DEP_MAX) {
+++		printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
+++			depth, HTREE_LOCK_DEP_MAX);
+++		return NULL;
+++	}
+++	lck = kzalloc(i + pbytes, GFP_NOFS);
+++	if (lck == NULL)
+++		return NULL;
+++
+++	if (pbytes != 0)
+++		lck->lk_private = (void *)lck + i;
+++	lck->lk_mode = HTREE_LOCK_INVAL;
+++	lck->lk_depth = depth;
+++	INIT_LIST_HEAD(&lck->lk_blocked_list);
+++
+++	for (i = 0; i < depth; i++) {
+++		struct htree_lock_node *node = &lck->lk_nodes[i];
+++
+++		node->ln_mode = HTREE_LOCK_INVAL;
+++		INIT_LIST_HEAD(&node->ln_major_list);
+++		INIT_LIST_HEAD(&node->ln_minor_list);
+++		INIT_LIST_HEAD(&node->ln_alive_list);
+++		INIT_LIST_HEAD(&node->ln_blocked_list);
+++		INIT_LIST_HEAD(&node->ln_granted_list);
+++	}
+++
+++	return lck;
+++}
+++EXPORT_SYMBOL(htree_lock_alloc);
+++
+++/* free htree_lock node */
+++void
+++htree_lock_free(struct htree_lock *lck)
+++{
+++	BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
+++	kfree(lck);
+++}
+++EXPORT_SYMBOL(htree_lock_free);
++Index: linux-source-2.6.32/fs/ext4/ext4.h
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:11:34.597665360 +0200
+++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:51.977663925 +0200
++@@ -28,6 +28,7 @@
++ #include <linux/mutex.h>
++ #include <linux/timer.h>
++ #include <linux/wait.h>
+++#include <linux/htree_lock.h>
++ #include <linux/blockgroup_lock.h>
++ #include <linux/percpu_counter.h>
++ #ifdef __KERNEL__
++@@ -1269,6 +1270,7 @@
++ #define EXT4_FEATURE_INCOMPAT_FLEX_BG		0x0200
++ #define EXT4_FEATURE_INCOMPAT_EA_INODE		0x0400
++ #define EXT4_FEATURE_INCOMPAT_DIRDATA		0x1000
+++#define EXT4_FEATURE_INCOMPAT_LARGEDIR		0x4000
++ 
++ #define EXT4_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
++ #define EXT4_FEATURE_INCOMPAT_SUPP	(EXT4_FEATURE_INCOMPAT_FILETYPE| \
++@@ -1279,7 +1281,8 @@
++ 					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
++ 					 EXT4_FEATURE_INCOMPAT_EA_INODE| \
++ 					 EXT4_FEATURE_INCOMPAT_MMP| \
++-					 EXT4_FEATURE_INCOMPAT_DIRDATA)
+++					 EXT4_FEATURE_INCOMPAT_DIRDATA| \
+++					 EXT4_FEATURE_INCOMPAT_LARGEDIR)
++ 
++ #define EXT4_FEATURE_RO_COMPAT_SUPP	(EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
++ 					 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
++@@ -1496,6 +1499,76 @@
++  */
++ #define ERR_BAD_DX_DIR	-75000
++ 
+++/* htree levels for ext4 */
+++#define EXT4_HTREE_LEVEL_COMPAT 2
+++#define EXT4_HTREE_LEVEL	3
+++
+++static inline int
+++ext4_dir_htree_level(struct super_block *sb)
+++{
+++	return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
+++		EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
+++}
+++
+++/* assume name-hash is protected by upper layer */
+++#define EXT4_HTREE_LOCK_HASH	0
+++
+++enum ext4_pdo_lk_types {
+++#if EXT4_HTREE_LOCK_HASH
+++	EXT4_LK_HASH,
+++#endif
+++	EXT4_LK_DX,		/* index block */
+++	EXT4_LK_DE,		/* directory entry block */
+++	EXT4_LK_SPIN,		/* spinlock */
+++	EXT4_LK_MAX,
+++};
+++
+++/* read-only bit */
+++#define EXT4_LB_RO(b)		(1 << (b))
+++/* read + write, high bits for writer */
+++#define EXT4_LB_RW(b)		((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
+++
+++enum ext4_pdo_lock_bits {
+++	/* DX lock bits */
+++	EXT4_LB_DX_RO		= EXT4_LB_RO(EXT4_LK_DX),
+++	EXT4_LB_DX		= EXT4_LB_RW(EXT4_LK_DX),
+++	/* DE lock bits */
+++	EXT4_LB_DE_RO		= EXT4_LB_RO(EXT4_LK_DE),
+++	EXT4_LB_DE		= EXT4_LB_RW(EXT4_LK_DE),
+++	/* DX spinlock bits */
+++	EXT4_LB_SPIN_RO		= EXT4_LB_RO(EXT4_LK_SPIN),
+++	EXT4_LB_SPIN		= EXT4_LB_RW(EXT4_LK_SPIN),
+++	/* accurate searching */
+++	EXT4_LB_EXACT		= EXT4_LB_RO(EXT4_LK_MAX << 1),
+++};
+++
+++enum ext4_pdo_lock_opc {
+++	/* external */
+++	EXT4_HLOCK_READDIR	= (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
+++	EXT4_HLOCK_LOOKUP	= (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
+++				   EXT4_LB_EXACT),
+++	EXT4_HLOCK_DEL		= (EXT4_LB_DE | EXT4_LB_SPIN_RO |
+++				   EXT4_LB_EXACT),
+++	EXT4_HLOCK_ADD		= (EXT4_LB_DE | EXT4_LB_SPIN_RO),
+++
+++	/* internal */
+++	EXT4_HLOCK_LOOKUP_SAFE	= (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
+++				   EXT4_LB_EXACT),
+++	EXT4_HLOCK_DEL_SAFE	= (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
+++	EXT4_HLOCK_SPLIT	= (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
+++};
+++
+++extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
+++#define ext4_htree_lock_head_free(lhead)	htree_lock_head_free(lhead)
+++
+++extern struct htree_lock *ext4_htree_lock_alloc(void);
+++#define ext4_htree_lock_free(lck)		htree_lock_free(lck)
+++
+++extern void ext4_htree_lock(struct htree_lock *lck,
+++			    struct htree_lock_head *lhead,
+++			    struct inode *dir, unsigned flags);
+++#define ext4_htree_unlock(lck)                  htree_unlock(lck)
+++
++ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
++ 			ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
++ 
++@@ -1735,14 +1808,16 @@
++ extern struct inode *ext4_create_inode(handle_t *handle,
++ 				       struct inode * dir, int mode);
++ extern int ext4_add_entry(handle_t *handle, struct dentry *dentry,
++-			  struct inode *inode);
+++			  struct inode *inode, struct htree_lock *lck);
++ extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
++ 			     struct ext4_dir_entry_2 * de_del,
++ 			     struct buffer_head * bh);
++ extern struct buffer_head * ext4_find_entry(struct inode *dir,
++ 					    const struct qstr *d_name,
++-					    struct ext4_dir_entry_2 ** res_dir);
++-#define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
+++					    struct ext4_dir_entry_2 **res_dir,
+++					    struct htree_lock *lck);
+++#define ll_ext4_find_entry(inode, dentry, res_dir, lck) \
+++	ext4_find_entry(inode, &(dentry)->d_name, res_dir, lck)
++ extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++ 			       struct inode *inode, const void *, const void *);
++ extern struct buffer_head *ext4_append(handle_t *handle,
++@@ -1852,13 +1927,15 @@
++ 	es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
++ }
++ 
++-static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
+++static inline loff_t ext4_isize(struct super_block *sb,
+++				struct ext4_inode *raw_inode)
++ {
++-	if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
+++	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ||
+++	    S_ISREG(le16_to_cpu(raw_inode->i_mode)))
++ 		return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
++ 			le32_to_cpu(raw_inode->i_size_lo);
++-	else
++-		return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
+++
+++	return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
++ }
++ 
++ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
++Index: linux-source-2.6.32/fs/ext4/namei.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:11:31.057668333 +0200
+++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:11:51.981664959 +0200
++@@ -176,7 +176,7 @@
++ 				 struct inode *dir,
++ 				 struct dx_hash_info *hinfo,
++ 				 struct dx_frame *frame,
++-				 int *err);
+++				 struct htree_lock *lck, int *err);
++ static void dx_release(struct dx_frame *frames);
++ static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
++ 		       struct dx_hash_info *hinfo, struct dx_map_entry map[]);
++@@ -189,13 +189,13 @@
++ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
++ 				 struct dx_frame *frame,
++ 				 struct dx_frame *frames,
++-				 __u32 *start_hash);
+++				 __u32 *start_hash, struct htree_lock *lck);
++ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
++ 		const struct qstr *d_name,
++ 		struct ext4_dir_entry_2 **res_dir,
++-		int *err);
+++		struct htree_lock *lck, int *err);
++ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
++-			     struct inode *inode);
+++			     struct inode *inode, struct htree_lock *lck);
++ 
++ unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
++ {
++@@ -249,7 +249,7 @@
++ 
++ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
++ {
++-	return le32_to_cpu(entry->block) & 0x00ffffff;
+++	return le32_to_cpu(entry->block) & 0x0fffffff;
++ }
++ 
++ static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
++@@ -392,6 +392,223 @@
++ }
++ #endif /* DX_DEBUG */
++ 
+++/* private data for htree_lock */
+++struct ext4_dir_lock_data {
+++	unsigned		ld_flags;  /* bits-map for lock types */
+++	unsigned		ld_count;  /* # entries of the last DX block */
+++	struct dx_entry		ld_at_entry; /* copy of leaf dx_entry */
+++	struct dx_entry		*ld_at;	   /* position of leaf dx_entry */
+++};
+++
+++#define ext4_htree_lock_data(l)	((struct ext4_dir_lock_data *)(l)->lk_private)
+++
+++/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
+++#define EXT4_HTREE_NODE_CHANGED	(0xcafeULL << 32)
+++
+++static void ext4_htree_event_cb(void *target, void *event)
+++{
+++	u64 *block = (u64 *)target;
+++
+++	if (*block == dx_get_block((struct dx_entry *)event))
+++		*block = EXT4_HTREE_NODE_CHANGED;
+++}
+++
+++struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
+++{
+++	struct htree_lock_head *lhead;
+++
+++	lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
+++	if (lhead != NULL) {
+++		htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
+++					ext4_htree_event_cb);
+++	}
+++	return lhead;
+++}
+++EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
+++
+++struct htree_lock *ext4_htree_lock_alloc(void)
+++{
+++	return htree_lock_alloc(EXT4_LK_MAX,
+++				sizeof(struct ext4_dir_lock_data));
+++}
+++EXPORT_SYMBOL(ext4_htree_lock_alloc);
+++
+++static htree_lock_mode_t ext4_htree_mode(unsigned flags)
+++{
+++	switch (flags) {
+++	default: /* 0 or unknown flags require EX lock */
+++		return HTREE_LOCK_EX;
+++	case EXT4_HLOCK_READDIR:
+++		return HTREE_LOCK_PR;
+++	case EXT4_HLOCK_LOOKUP:
+++		return HTREE_LOCK_CR;
+++	case EXT4_HLOCK_DEL:
+++	case EXT4_HLOCK_ADD:
+++		return HTREE_LOCK_CW;
+++	}
+++}
+++
+++/* return PR for read-only operations, otherwise return EX */
+++static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
+++{
+++	int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
+++
+++	/* 0 requires EX lock */
+++	return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
+++}
+++
+++static int ext4_htree_safe_locked(struct htree_lock *lck)
+++{
+++	int writer;
+++
+++	if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
+++		return 1;
+++
+++	writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
+++		 EXT4_LB_DE;
+++	if (writer) /* all readers & writers are excluded? */
+++		return lck->lk_mode == HTREE_LOCK_EX;
+++
+++	/* all writers are excluded? */
+++	return lck->lk_mode == HTREE_LOCK_PR ||
+++	       lck->lk_mode == HTREE_LOCK_PW ||
+++	       lck->lk_mode == HTREE_LOCK_EX;
+++}
+++
+++/* relock htree_lock with EX mode if it's change operation, otherwise
+++ * relock it with PR mode. It's noop if PDO is disabled. */
+++static void ext4_htree_safe_relock(struct htree_lock *lck)
+++{
+++	if (!ext4_htree_safe_locked(lck)) {
+++		unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
+++
+++		htree_change_lock(lck, ext4_htree_safe_mode(flags));
+++	}
+++}
+++
+++void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
+++		     struct inode *dir, unsigned flags)
+++{
+++	htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
+++					      ext4_htree_safe_mode(flags);
+++
+++	ext4_htree_lock_data(lck)->ld_flags = flags;
+++	htree_lock(lck, lhead, mode);
+++	if (!is_dx(dir))
+++		ext4_htree_safe_relock(lck); /* make sure it's safe locked */
+++}
+++EXPORT_SYMBOL(ext4_htree_lock);
+++
+++static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
+++				unsigned lmask, int wait, void *ev)
+++{
+++	u32	key = (at == NULL) ? 0 : dx_get_block(at);
+++	u32	mode;
+++
+++	/* NOOP if htree is well protected or caller doesn't require the lock */
+++	if (ext4_htree_safe_locked(lck) ||
+++	   !(ext4_htree_lock_data(lck)->ld_flags & lmask))
+++		return 1;
+++
+++	mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
+++		HTREE_LOCK_PW : HTREE_LOCK_PR;
+++	while (1) {
+++		if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
+++			return 1;
+++		if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
+++			return 0;
+++		cpu_relax(); /* spin until granted */
+++	}
+++}
+++
+++static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
+++{
+++	return ext4_htree_safe_locked(lck) ||
+++	       htree_node_is_granted(lck, ffz(~lmask));
+++}
+++
+++static void ext4_htree_node_unlock(struct htree_lock *lck,
+++				   unsigned lmask, void *buf)
+++{
+++	/* NB: it's safe to call mutiple times or even it's not locked */
+++	if (!ext4_htree_safe_locked(lck) &&
+++	     htree_node_is_granted(lck, ffz(~lmask)))
+++		htree_node_unlock(lck, ffz(~lmask), buf);
+++}
+++
+++#define ext4_htree_dx_lock(lck, key)		\
+++	ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
+++#define ext4_htree_dx_lock_try(lck, key)	\
+++	ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
+++#define ext4_htree_dx_unlock(lck)		\
+++	ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
+++#define ext4_htree_dx_locked(lck)		\
+++	ext4_htree_node_locked(lck, EXT4_LB_DX)
+++
+++static void ext4_htree_dx_need_lock(struct htree_lock *lck)
+++{
+++	struct ext4_dir_lock_data *ld;
+++
+++	if (ext4_htree_safe_locked(lck))
+++		return;
+++
+++	ld = ext4_htree_lock_data(lck);
+++	switch (ld->ld_flags) {
+++	default:
+++		return;
+++	case EXT4_HLOCK_LOOKUP:
+++		ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
+++		return;
+++	case EXT4_HLOCK_DEL:
+++		ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
+++		return;
+++	case EXT4_HLOCK_ADD:
+++		ld->ld_flags = EXT4_HLOCK_SPLIT;
+++		return;
+++	}
+++}
+++
+++#define ext4_htree_de_lock(lck, key)		\
+++	ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
+++#define ext4_htree_de_unlock(lck)		\
+++	ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
+++
+++#define ext4_htree_spin_lock(lck, key, event)	\
+++	ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
+++#define ext4_htree_spin_unlock(lck)		\
+++	ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
+++#define ext4_htree_spin_unlock_listen(lck, p)	\
+++	ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
+++
+++static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
+++{
+++	if (!ext4_htree_safe_locked(lck) &&
+++	    htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
+++		htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
+++}
+++
+++enum {
+++	DX_HASH_COL_IGNORE,	/* ignore collision while probing frames */
+++	DX_HASH_COL_YES,	/* there is collision and it does matter */
+++	DX_HASH_COL_NO,		/* there is no collision */
+++};
+++
+++static int dx_probe_hash_collision(struct htree_lock *lck,
+++				   struct dx_entry *entries,
+++				   struct dx_entry *at, u32 hash)
+++{
+++	if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
+++		return DX_HASH_COL_IGNORE; /* don't care about collision */
+++
+++	} else if (at == entries + dx_get_count(entries) - 1) {
+++		return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
+++
+++	} else { /* hash collision? */
+++		return ((dx_get_hash(at + 1) & ~1) == hash) ?
+++			DX_HASH_COL_YES : DX_HASH_COL_NO;
+++	}
+++}
+++
++ /*
++  * Probe for a directory leaf block to search.
++  *
++@@ -403,16 +620,17 @@
++  */
++ static struct dx_frame *
++ dx_probe(const struct qstr *d_name, struct inode *dir,
++-	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
+++	 struct dx_hash_info *hinfo, struct dx_frame *frame_in,
+++	 struct htree_lock *lck, int *err)
++ {
++ 	unsigned count, indirect;
++-	struct dx_entry *at, *entries, *p, *q, *m;
+++	struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
++ 	struct dx_root_info * info;
++ 	struct buffer_head *bh;
++ 	struct dx_frame *frame = frame_in;
++ 	u32 hash;
++ 
++-	frame->bh = NULL;
+++	memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
++ 	if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
++ 		goto fail;
++ 	info = dx_get_dx_info((struct ext4_dir_entry_2*)bh->b_data);
++@@ -443,10 +661,16 @@
++ 		goto fail;
++ 	}
++ 
++-	if ((indirect = info->indirect_levels) > 1) {
+++	indirect = info->indirect_levels;
+++	if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
++ 		ext4_warning(dir->i_sb, __func__,
++-			     "Unimplemented inode hash depth: %#06x",
++-			     info->indirect_levels);
+++		"Directory (ino: %lu) htree depth %#06x exceed "
+++		"supported value", dir->i_ino,
+++		ext4_dir_htree_level(dir->i_sb));
+++		if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
+++			ext4_warning(dir->i_sb, __func__, "Enable large directory "
+++			"feature to access it");
+++		}
++ 		brelse(bh);
++ 		*err = ERR_BAD_DX_DIR;
++ 		goto fail;
++@@ -467,8 +691,15 @@
++ 	dxtrace(printk("Look up %x", hash));
++ 	while (1)
++ 	{
+++		if (indirect == 0) { /* the last index level */
+++			/* NB: ext4_htree_dx_lock() could be noop if
+++			 * DX-lock flag is not set for current operation */
+++			ext4_htree_dx_lock(lck, dx);
+++			ext4_htree_spin_lock(lck, dx, NULL);
+++		}
++ 		count = dx_get_count(entries);
++-		if (!count || count > dx_get_limit(entries)) {
+++		if (count == 0 || count > dx_get_limit(entries)) {
+++			ext4_htree_spin_unlock(lck); /* release spin */
++ 			ext4_warning(dir->i_sb, __func__,
++ 				     "dx entry: no count or count > limit");
++ 			brelse(bh);
++@@ -509,9 +740,73 @@
++ 		frame->bh = bh;
++ 		frame->entries = entries;
++ 		frame->at = at;
++-		if (!indirect--) return frame;
+++
+++		if (indirect == 0) { /* the last index level */
+++			struct ext4_dir_lock_data *ld;
+++			u64 myblock;
+++
+++			/* By default we only lock DE-block, however, we will
+++			 * also lock the last level DX-block if:
+++			 * a) there is hash collision
+++			 *    we will set DX-lock flag (a few lines below)
+++			 *    and redo to lock DX-block
+++			 *    see detail in dx_probe_hash_collision()
+++			 * b) it's a retry from splitting
+++			 *    we need to lock the last level DX-block so nobody
+++			 *    else can split any leaf blocks under the same
+++			 *    DX-block, see detail in ext4_dx_add_entry()
+++			 */
+++			if (ext4_htree_dx_locked(lck)) {
+++				/* DX-block is locked, just lock DE-block
+++				 * and return */
+++				ext4_htree_spin_unlock(lck);
+++				if (!ext4_htree_safe_locked(lck))
+++					ext4_htree_de_lock(lck, frame->at);
+++				return frame;
+++			}
+++			/* it's pdirop and no DX lock */
+++			if (dx_probe_hash_collision(lck, entries, at, hash) ==
+++			    DX_HASH_COL_YES) {
+++				/* found hash collision, set DX-lock flag
+++				 * and retry to abtain DX-lock */
+++				ext4_htree_spin_unlock(lck);
+++				ext4_htree_dx_need_lock(lck);
+++				continue;
+++			}
+++			ld = ext4_htree_lock_data(lck);
+++			/* because I don't lock DX, so @at can't be trusted
+++			 * after I release spinlock so I have to save it */
+++			ld->ld_at = at;
+++			ld->ld_at_entry = *at;
+++			ld->ld_count = dx_get_count(entries);
+++
+++			frame->at = &ld->ld_at_entry;
+++			myblock = dx_get_block(at);
+++
+++			/* NB: ordering locking */
+++			ext4_htree_spin_unlock_listen(lck, &myblock);
+++			/* other thread can split this DE-block because:
+++			 * a) I don't have lock for the DE-block yet
+++			 * b) I released spinlock on DX-block
+++			 * if it happened I can detect it by listening
+++			 * splitting event on this DE-block */
+++			ext4_htree_de_lock(lck, frame->at);
+++			ext4_htree_spin_stop_listen(lck);
+++
+++			if (myblock == EXT4_HTREE_NODE_CHANGED) {
+++				/* someone split this DE-block before
+++				 * I locked it, I need to retry and lock
+++				 * valid DE-block */
+++				ext4_htree_de_unlock(lck);
+++				continue;
+++			}
+++			return frame;
+++		}
+++		dx = at;
+++		indirect--;
++ 		if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
++ 			goto fail2;
+++
++ 		at = entries = ((struct dx_node *) bh->b_data)->entries;
++ 		if (dx_get_limit(entries) != dx_node_limit (dir)) {
++ 			ext4_warning(dir->i_sb, __func__,
++@@ -539,13 +834,18 @@
++ static void dx_release (struct dx_frame *frames)
++ {
++ 	struct dx_root_info *info;
+++	int i;
+++
++ 	if (frames[0].bh == NULL)
++ 		return;
++ 
++ 	info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
++-	if (info->indirect_levels)
++-		brelse(frames[1].bh);
++-	brelse(frames[0].bh);
+++	for (i = 0; i <= info->indirect_levels; i++) {
+++		if (frames[i].bh == NULL)
+++			break;
+++		brelse(frames[i].bh);
+++		frames[i].bh = NULL;
+++	}
++ }
++ 
++ /*
++@@ -568,7 +868,7 @@
++ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
++ 				 struct dx_frame *frame,
++ 				 struct dx_frame *frames,
++-				 __u32 *start_hash)
+++				 __u32 *start_hash, struct htree_lock *lck)
++ {
++ 	struct dx_frame *p;
++ 	struct buffer_head *bh;
++@@ -583,12 +883,22 @@
++ 	 * this loop, num_frames indicates the number of interior
++ 	 * nodes need to be read.
++ 	 */
+++	ext4_htree_de_unlock(lck);
++ 	while (1) {
++-		if (++(p->at) < p->entries + dx_get_count(p->entries))
++-			break;
+++		if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
+++			/* num_frames > 0 :
+++			 *   DX block
+++			 * ext4_htree_dx_locked:
+++			 *   frame->at is reliable pointer returned by dx_probe,
+++			 *   otherwise dx_probe already knew no collision */
+++			if (++(p->at) < p->entries + dx_get_count(p->entries))
+++				break;
+++		}
++ 		if (p == frames)
++ 			return 0;
++ 		num_frames++;
+++		if (num_frames == 1)
+++			ext4_htree_dx_unlock(lck);
++ 		p--;
++ 	}
++ 
++@@ -611,6 +921,13 @@
++ 	 * block so no check is necessary
++ 	 */
++ 	while (num_frames--) {
+++		if (num_frames == 0) {
+++			/* it's not always necessary, we just don't want to
+++			 * detect hash collision again */
+++			ext4_htree_dx_need_lock(lck);
+++			ext4_htree_dx_lock(lck, p->at);
+++		}
+++
++ 		if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
++ 				      0, &err)))
++ 			return err; /* Failure */
++@@ -619,6 +936,7 @@
++ 		p->bh = bh;
++ 		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
++ 	}
+++	ext4_htree_de_lock(lck, p->at);
++ 	return 1;
++ }
++ 
++@@ -688,7 +1006,7 @@
++ {
++ 	struct dx_hash_info hinfo;
++ 	struct ext4_dir_entry_2 *de;
++-	struct dx_frame frames[2], *frame;
+++	struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
++ 	struct inode *dir;
++ 	ext4_lblk_t block;
++ 	int count = 0;
++@@ -711,10 +1029,10 @@
++ 	}
++ 	hinfo.hash = start_hash;
++ 	hinfo.minor_hash = 0;
++-	frame = dx_probe(NULL, dir, &hinfo, frames, &err);
+++	/* assume it's PR locked */
+++	frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
++ 	if (!frame)
++ 		return err;
++-
++ 	/* Add '.' and '..' from the htree header */
++ 	if (!start_hash && !start_minor_hash) {
++ 		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
++@@ -741,7 +1059,7 @@
++ 		count += ret;
++ 		hashval = ~0;
++ 		ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
++-					    frame, frames, &hashval);
+++					    frame, frames, &hashval, NULL);
++ 		*next_hash = hashval;
++ 		if (ret < 0) {
++ 			err = ret;
++@@ -841,9 +1159,17 @@
++ 
++ static void ext4_update_dx_flag(struct inode *inode)
++ {
+++	/* Disable it for ldiskfs, because going from a DX directory to
+++	 * a non-DX directory while it is in use will completely break
+++	 * the htree-locking.
+++	 * If we really want to support this operation in the future,
+++	 * we need to exclusively lock the directory at here which will
+++	 * increase complexity of code */
+++#if 0
++ 	if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
++ 				     EXT4_FEATURE_COMPAT_DIR_INDEX))
++ 		ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+++#endif
++ }
++ 
++ /*
++@@ -916,8 +1242,9 @@
++  * to brelse() it when appropriate.
++  */
++ struct buffer_head * ext4_find_entry(struct inode *dir,
++-				      const struct qstr *d_name,
++-				      struct ext4_dir_entry_2 ** res_dir)
+++				     const struct qstr *d_name,
+++				     struct ext4_dir_entry_2 **res_dir,
+++				     struct htree_lock *lck)
++ {
++ 	struct super_block *sb;
++ 	struct buffer_head *bh_use[NAMEI_RA_SIZE];
++@@ -938,7 +1265,7 @@
++ 	if (namelen > EXT4_NAME_LEN)
++ 		return NULL;
++ 	if (is_dx(dir)) {
++-		bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
+++		bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
++ 		/*
++ 		 * On success, or if the error was file not found,
++ 		 * return.  Otherwise, fall back to doing a search the
++@@ -948,6 +1275,7 @@
++ 			return bh;
++ 		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
++ 			       "falling back\n"));
+++		ext4_htree_safe_relock(lck);
++ 	}
++ 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
++ 	start = EXT4_I(dir)->i_dir_start_lookup;
++@@ -1026,13 +1354,15 @@
++ }
++ EXPORT_SYMBOL(ext4_find_entry);
++ 
++-static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
++-		       struct ext4_dir_entry_2 **res_dir, int *err)
+++static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+++				const struct qstr *d_name,
+++				struct ext4_dir_entry_2 **res_dir,
+++				struct htree_lock *lck, int *err)
++ {
++ 	struct super_block * sb;
++ 	struct dx_hash_info	hinfo;
++ 	u32 hash;
++-	struct dx_frame frames[2], *frame;
+++	struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
++ 	struct ext4_dir_entry_2 *de, *top;
++ 	struct buffer_head *bh;
++ 	ext4_lblk_t block;
++@@ -1043,13 +1373,16 @@
++ 	sb = dir->i_sb;
++ 	/* NFS may look up ".." - look at dx_root directory block */
++ 	if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
++-		if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
+++		if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
++ 			return NULL;
++ 	} else {
++ 		frame = frames;
++ 		frame->bh = NULL;			/* for dx_release() */
++ 		frame->at = (struct dx_entry *)frames;	/* hack for zero entry*/
++ 		dx_set_block(frame->at, 0);		/* dx_root block is 0 */
+++		/* "." and ".." are stored in root DX lock */
+++		ext4_htree_dx_need_lock(lck);
+++		ext4_htree_dx_lock(lck, NULL);
++ 	}
++ 	hash = hinfo.hash;
++ 	do {
++@@ -1078,7 +1411,7 @@
++ 		brelse(bh);
++ 		/* Check to see if we should continue to search */
++ 		retval = ext4_htree_next_block(dir, hash, frame,
++-					       frames, NULL);
+++					       frames, NULL, lck);
++ 		if (retval < 0) {
++ 			ext4_warning(sb, __func__,
++ 			     "error reading index page in directory #%lu",
++@@ -1104,7 +1437,7 @@
++ 	if (dentry->d_name.len > EXT4_NAME_LEN)
++ 		return ERR_PTR(-ENAMETOOLONG);
++ 
++-	bh = ext4_find_entry(dir, &dentry->d_name, &de);
+++	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
++ 	inode = NULL;
++ 	if (bh) {
++ 		__u32 ino = le32_to_cpu(de->inode);
++@@ -1173,7 +1506,7 @@
++ 	struct ext4_dir_entry_2 * de;
++ 	struct buffer_head *bh;
++ 
++-	bh = ext4_find_entry(child->d_inode, &dotdot, &de);
+++	bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
++ 	inode = NULL;
++ 	if (!bh)
++ 		return ERR_PTR(-ENOENT);
++@@ -1262,8 +1595,9 @@
++  * Returns pointer to de in block into which the new entry will be inserted.
++  */
++ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
++-			struct buffer_head **bh,struct dx_frame *frame,
++-			struct dx_hash_info *hinfo, int *error)
+++			struct buffer_head **bh, struct dx_frame *frames,
+++			struct dx_frame *frame, struct dx_hash_info *hinfo,
+++			struct htree_lock *lck, int *error)
++ {
++ 	unsigned blocksize = dir->i_sb->s_blocksize;
++ 	unsigned count, continued;
++@@ -1320,7 +1654,14 @@
++ 					hash2, split, count-split));
++ 
++ 	/* Fancy dance to stay within two buffers */
++-	de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
+++	if (hinfo->hash < hash2) {
+++		de2 = dx_move_dirents(data1, data2, map + split,
+++				      count - split, blocksize);
+++	} else {
+++		/* make sure we will add entry to the same block which
+++		 * we have already locked */
+++		de2 = dx_move_dirents(data1, data2, map, split, blocksize);
+++	}
++ 	de = dx_pack_dirents(data1, blocksize);
++ 	de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
++ 					   blocksize);
++@@ -1329,13 +1670,21 @@
++ 	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
++ 	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
++ 
++-	/* Which block gets the new entry? */
++-	if (hinfo->hash >= hash2)
++-	{
++-		swap(*bh, bh2);
++-		de = de2;
+++	ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
+++			     frame->at); /* notify block is being split */
+++	if (hinfo->hash < hash2) {
+++		dx_insert_block(frame, hash2 + continued, newblock);
+++
+++	} else {
+++		/* switch block number */
+++		dx_insert_block(frame, hash2 + continued,
+++				dx_get_block(frame->at));
+++		dx_set_block(frame->at, newblock);
+++		(frame->at)++;
++ 	}
++-	dx_insert_block(frame, hash2 + continued, newblock);
+++	ext4_htree_spin_unlock(lck);
+++	ext4_htree_dx_unlock(lck);
+++
++ 	err = ext4_handle_dirty_metadata(handle, dir, bh2);
++ 	if (err)
++ 		goto journal_error;
++@@ -1447,7 +1796,7 @@
++ 	if (!IS_NOCMTIME(dir))
++ 		dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
++ 	ext4_update_dx_flag(dir);
++-	dir->i_version++;
+++	inode_inc_iversion(dir);
++ 	ext4_mark_inode_dirty(handle, dir);
++ 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
++ 	err = ext4_handle_dirty_metadata(handle, dir, bh);
++@@ -1467,7 +1816,7 @@
++ 	const char	*name = dentry->d_name.name;
++ 	int		namelen = dentry->d_name.len;
++ 	struct buffer_head *bh2;
++-	struct dx_frame	frames[2], *frame;
+++	struct dx_frame	frames[EXT4_HTREE_LEVEL], *frame;
++ 	struct dx_entry *entries;
++ 	struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
++ 	char		*data1, *top;
++@@ -1545,7 +1894,7 @@
++ 	frame->at = entries;
++ 	frame->bh = bh;
++ 	bh = bh2;
++-	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
+++	de = do_split(handle,dir, &bh, frames, frame, &hinfo, NULL, &retval);
++ 	dx_release (frames);
++ 	if (!(de))
++ 		return retval;
++@@ -1644,7 +1993,7 @@
++  * the entry, as someone else might have used it while you slept.
++  */
++ int ext4_add_entry(handle_t *handle, struct dentry *dentry,
++-		   struct inode *inode)
+++		   struct inode *inode, struct htree_lock *lck)
++ {
++ 	struct inode *dir = dentry->d_parent->d_inode;
++ 	struct buffer_head *bh;
++@@ -1663,9 +2012,10 @@
++ 		if (dentry->d_name.len == 2 &&
++ 		    memcmp(dentry->d_name.name, "..", 2) == 0)
++ 			return ext4_update_dotdot(handle, dentry, inode);
++-		retval = ext4_dx_add_entry(handle, dentry, inode);
+++		retval = ext4_dx_add_entry(handle, dentry, inode, lck);
++ 		if (!retval || (retval != ERR_BAD_DX_DIR))
++ 			return retval;
+++		ext4_htree_safe_relock(lck);
++ 		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
++ 		dx_fallback++;
++ 		ext4_mark_inode_dirty(handle, dir);
++@@ -1704,18 +2054,21 @@
++  * Returns 0 for success, or a negative error value
++  */
++ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
++-			     struct inode *inode)
+++			     struct inode *inode, struct htree_lock *lck)
++ {
++-	struct dx_frame frames[2], *frame;
+++	struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
++ 	struct dx_entry *entries, *at;
++ 	struct dx_hash_info hinfo;
++ 	struct buffer_head *bh;
++ 	struct inode *dir = dentry->d_parent->d_inode;
++ 	struct super_block *sb = dir->i_sb;
++ 	struct ext4_dir_entry_2 *de;
+++	int restart;
++ 	int err;
++ 
++-	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
+++again:
+++	restart = 0;
+++	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
++ 	if (!frame)
++ 		return err;
++ 	entries = frame->entries;
++@@ -1729,29 +2082,59 @@
++ 	if (err)
++ 		goto journal_error;
++ 
+++	BUFFER_TRACE(bh, "get_write_access");
+++	err = ext4_journal_get_write_access(handle, bh);
+++	if (err)
+++		goto journal_error;
+++
++ 	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
++ 	if (err != -ENOSPC)
++ 		goto cleanup;
++ 
+++	err = 0;
++ 	/* Block full, should compress but for now just split */
++ 	dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
++ 		       dx_get_count(entries), dx_get_limit(entries)));
++ 	/* Need to split index? */
++ 	if (dx_get_count(entries) == dx_get_limit(entries)) {
++ 		ext4_lblk_t newblock;
++-		unsigned icount = dx_get_count(entries);
++-		int levels = frame - frames;
+++		int levels = frame - frames + 1;
+++		unsigned icount;
+++		int add_level = 1;
++ 		struct dx_entry *entries2;
++ 		struct dx_node *node2;
++ 		struct buffer_head *bh2;
++ 
++-		if (levels && (dx_get_count(frames->entries) ==
++-			       dx_get_limit(frames->entries))) {
++-			ext4_warning(sb, __func__,
++-				     "Directory index full!");
+++		if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
+++			ext4_htree_safe_relock(lck);
+++			restart = 1;
+++			goto cleanup;
+++		}
+++		while (frame > frames) {
+++			if (dx_get_count((frame - 1)->entries) <
+++			    dx_get_limit((frame - 1)->entries)) {
+++				add_level = 0;
+++				break;
+++			}
+++			frame--; /* split higher index block */
+++			at = frame->at;
+++			entries = frame->entries;
+++			restart = 1;
+++		}
+++		
+++		if (add_level && levels == ext4_dir_htree_level(sb)) {
+++			ext4_warning(sb, __func__, "Directory (ino: %lu) index full, "
+++					 "reach max htree level :%d",
+++					 dir->i_ino, levels);
+++			if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
+++				ext4_warning(sb, __func__, "Large directory feature is"
+++						 "not enabled on this "
+++						 "filesystem");
+++			}
++ 			err = -ENOSPC;
++ 			goto cleanup;
++ 		}
+++		icount = dx_get_count(entries);
++ 		bh2 = ext4_append (handle, dir, &newblock, &err);
++ 		if (!(bh2))
++ 			goto cleanup;
++@@ -1764,7 +2147,7 @@
++ 		err = ext4_journal_get_write_access(handle, frame->bh);
++ 		if (err)
++ 			goto journal_error;
++-		if (levels) {
+++		if (!add_level) {
++ 			unsigned icount1 = icount/2, icount2 = icount - icount1;
++ 			unsigned hash2 = dx_get_hash(entries + icount1);
++ 			dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
++@@ -1772,7 +2155,7 @@
++ 
++ 			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
++ 			err = ext4_journal_get_write_access(handle,
++-							     frames[0].bh);
+++							    (frame - 1)->bh);
++ 			if (err)
++ 				goto journal_error;
++ 
++@@ -1788,18 +2171,24 @@
++ 				frame->entries = entries = entries2;
++ 				swap(frame->bh, bh2);
++ 			}
++-			dx_insert_block(frames + 0, hash2, newblock);
++-			dxtrace(dx_show_index("node", frames[1].entries));
+++			dx_insert_block((frame - 1), hash2, newblock);
+++			dxtrace(dx_show_index("node", frame->entries));
++ 			dxtrace(dx_show_index("node",
++ 			       ((struct dx_node *) bh2->b_data)->entries));
++ 			err = ext4_handle_dirty_metadata(handle, inode, bh2);
++ 			if (err)
++ 				goto journal_error;
++ 			brelse (bh2);
+++			ext4_handle_dirty_metadata(handle, inode,
+++						   (frame - 1)->bh);
+++			if (restart) {
+++				ext4_handle_dirty_metadata(handle, inode,
+++							   frame->bh);
+++				goto cleanup;
+++			}
++ 		} else {
++ 			struct dx_root_info * info;
++-			dxtrace(printk(KERN_DEBUG
++-				       "Creating second level index...\n"));
+++
++ 			memcpy((char *) entries2, (char *) entries,
++ 			       icount * sizeof(struct dx_entry));
++ 			dx_set_limit(entries2, dx_node_limit(dir));
++@@ -1809,32 +2198,60 @@
++ 			dx_set_block(entries + 0, newblock);
++ 			info = dx_get_dx_info((struct ext4_dir_entry_2*)
++ 					frames[0].bh->b_data);
++-			info->indirect_levels = 1;
+++			info->indirect_levels += 1;
+++			dxtrace(printk(KERN_DEBUG
+++				       "Creating %d level index...\n",
+++				       info->indirect_levels));
+++			ext4_handle_dirty_metadata(handle, inode, frame->bh);
+++			ext4_handle_dirty_metadata(handle, inode, bh2);
+++			brelse(bh2);
+++			restart = 1;
+++			goto cleanup;
+++		}
+++	} else if (!ext4_htree_dx_locked(lck)) {
+++		struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
++ 
++-			/* Add new access path frame */
++-			frame = frames + 1;
++-			frame->at = at = at - entries + entries2;
++-			frame->entries = entries = entries2;
++-			frame->bh = bh2;
++-			err = ext4_journal_get_write_access(handle,
++-							     frame->bh);
++-			if (err)
++-				goto journal_error;
+++		/* not well protected, require DX lock */
+++		ext4_htree_dx_need_lock(lck);
+++		at = frame > frames ? (frame - 1)->at : NULL;
+++
+++		/* NB: no risk of deadlock because it's just a try.
+++		 *
+++		 * NB: we check ld_count for twice, the first time before
+++		 * having DX lock, the second time after holding DX lock.
+++		 *
+++		 * NB: We never free blocks for directory so far, which
+++		 * means value returned by dx_get_count() should equal to
+++		 * ld->ld_count if nobody split any DE-block under @at,
+++		 * and ld->ld_at still points to valid dx_entry. */
+++		if ((ld->ld_count != dx_get_count(entries)) ||
+++		    !ext4_htree_dx_lock_try(lck, at) ||
+++		    (ld->ld_count != dx_get_count(entries))) {
+++			restart = 1;
+++			goto cleanup;
++ 		}
++-		ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+++		/* OK, I've got DX lock and nothing changed */
+++		frame->at = ld->ld_at;
++ 	}
++-	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
+++	de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
++ 	if (!de)
++ 		goto cleanup;
+++
++ 	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
++ 	goto cleanup;
++ 
++ journal_error:
++ 	ext4_std_error(dir->i_sb, err);
++ cleanup:
+++	ext4_htree_dx_unlock(lck);
+++	ext4_htree_de_unlock(lck);
++ 	if (bh)
++ 		brelse(bh);
++ 	dx_release(frames);
+++	/* @restart is true means htree-path has been changed, we need to
+++	 * repeat dx_probe() to find out valid htree-path */
+++	if (restart && err == 0)
+++		goto again;
++ 	return err;
++ }
++ 
++@@ -1869,7 +2286,7 @@
++ 					blocksize);
++ 			else
++ 				de->inode = 0;
++-			dir->i_version++;
+++			inode_inc_iversion(dir);
++ 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
++ 			ext4_handle_dirty_metadata(handle, dir, bh);
++ 			return 0;
++@@ -1912,7 +2329,7 @@
++ static int ext4_add_nondir(handle_t *handle,
++ 		struct dentry *dentry, struct inode *inode)
++ {
++-	int err = ext4_add_entry(handle, dentry, inode);
+++	int err = ext4_add_entry(handle, dentry, inode, NULL);
++ 	if (!err) {
++ 		ext4_mark_inode_dirty(handle, inode);
++ 		d_instantiate(dentry, inode);
++@@ -2142,7 +2559,7 @@
++ 		goto out_stop;
++ 	}
++ 
++-	err = ext4_add_entry(handle, dentry, inode);
+++	err = ext4_add_entry(handle, dentry, inode, NULL);
++ 	if (err) {
++ 		clear_nlink(inode);
++ 		unlock_new_inode(inode);
++@@ -2411,7 +2828,7 @@
++ 		return PTR_ERR(handle);
++ 
++ 	retval = -ENOENT;
++-	bh = ext4_find_entry(dir, &dentry->d_name, &de);
+++	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
++ 	if (!bh)
++ 		goto end_rmdir;
++ 
++@@ -2473,7 +2890,7 @@
++ 		ext4_handle_sync(handle);
++ 
++ 	retval = -ENOENT;
++-	bh = ext4_find_entry(dir, &dentry->d_name, &de);
+++	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
++ 	if (!bh)
++ 		goto end_unlink;
++ 
++@@ -2597,7 +3014,7 @@
++ 	ext4_inc_count(handle, inode);
++ 	atomic_inc(&inode->i_count);
++ 
++-	err = ext4_add_entry(handle, dentry, inode);
+++	err = ext4_add_entry(handle, dentry, inode, NULL);
++ 	if (!err) {
++ 		ext4_mark_inode_dirty(handle, inode);
++ 		d_instantiate(dentry, inode);
++@@ -2642,7 +3059,7 @@
++ 	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
++ 		ext4_handle_sync(handle);
++ 
++-	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
+++	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
++ 	/*
++ 	 *  Check for inode number is _not_ due to possible IO errors.
++ 	 *  We might rmdir the source, keep it as pwd of some process
++@@ -2655,7 +3072,7 @@
++ 		goto end_rename;
++ 
++ 	new_inode = new_dentry->d_inode;
++-	new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de);
+++	new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de, NULL);
++ 	if (new_bh) {
++ 		if (!new_inode) {
++ 			brelse(new_bh);
++@@ -2681,7 +3098,7 @@
++ 			goto end_rename;
++ 	}
++ 	if (!new_bh) {
++-		retval = ext4_add_entry(handle, new_dentry, old_inode);
+++		retval = ext4_add_entry(handle, new_dentry, old_inode, NULL);
++ 		if (retval)
++ 			goto end_rename;
++ 	} else {
++@@ -2723,7 +3140,8 @@
++ 		struct buffer_head *old_bh2;
++ 		struct ext4_dir_entry_2 *old_de2;
++ 
++-		old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2);
+++		old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
+++					  &old_de2, NULL);
++ 		if (old_bh2) {
++ 			retval = ext4_delete_entry(handle, old_dir,
++ 						   old_de2, old_bh2);
++Index: linux-source-2.6.32/fs/ext4/inode.c
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:10:23.333666208 +0200
+++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:11:51.985662967 +0200
++@@ -4923,7 +4923,7 @@
++ 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
++ 		ei->i_file_acl |=
++ 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
++-	inode->i_size = ext4_isize(raw_inode);
+++	inode->i_size = ext4_isize(sb, raw_inode);
++ 	ei->i_disksize = inode->i_size;
++ #ifdef CONFIG_QUOTA
++ 	ei->i_reserved_quota = 0;
++Index: linux-source-2.6.32/fs/ext4/Makefile
++===================================================================
++--- linux-source-2.6.32.orig/fs/ext4/Makefile	2012-06-28 12:10:45.425668386 +0200
+++++ linux-source-2.6.32/fs/ext4/Makefile	2012-06-28 12:11:51.985662967 +0200
++@@ -7,7 +7,7 @@
++ ext4-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
++ 		ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
++ 		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
++-		mmp.o dynlocks.o
+++		htree_lock.o mmp.o dynlocks.o
++ 
++ ext4-$(CONFIG_EXT4_FS_XATTR)		+= xattr.o xattr_user.o xattr_trusted.o
++ ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
diff --git a/ldiskfs/kernel_patches/patches/export-ext4-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/export-ext4-2.6.32-vanilla.patch
new file mode 100644
index 0000000..79a0e7f
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/export-ext4-2.6.32-vanilla.patch
@@ -0,0 +1,81 @@
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:09:14.205662995 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:09:19.413668742 +0200
+@@ -292,6 +292,8 @@
+ 	jbd2_journal_abort_handle(handle);
+ }
+ 
++EXPORT_SYMBOL(ext4_journal_abort_handle);
++
+ /* Deal with the reporting of failure conditions on a filesystem such as
+  * inconsistencies detected or read IO failures.
+  *
+@@ -3001,6 +3003,8 @@
+ 	return ret;
+ }
+ 
++EXPORT_SYMBOL(ext4_force_commit);
++
+ /*
+  * Setup any per-fs journal parameters now.  We'll do this both on
+  * initial mount, once the journal has been initialised but before we've
+@@ -4056,6 +4060,12 @@
+ 			unsigned long *blocks, int *created, int create);
+ EXPORT_SYMBOL(ext4_map_inode_page);
+ 
++EXPORT_SYMBOL(ext4_xattr_get);
++EXPORT_SYMBOL(ext4_xattr_set_handle);
++EXPORT_SYMBOL(ext4_bread);
++EXPORT_SYMBOL(ext4_journal_start_sb);
++EXPORT_SYMBOL(__ext4_journal_stop);
++
+ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
+ MODULE_DESCRIPTION("Fourth Extended Filesystem");
+ MODULE_LICENSE("GPL");
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:08:38.193663076 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:09:19.417666703 +0200
+@@ -1498,6 +1498,8 @@
+ 				       struct buffer_head *bh,
+ 				       ext4_group_t group,
+ 				       struct ext4_group_desc *desc);
++extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb,
++						  ext4_group_t block_group);
+ extern void mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
+ 
+ /* mballoc.c */
+Index: linux-source-2.6.32/fs/ext4/ialloc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:08:38.081662455 +0200
++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:09:19.417666703 +0200
+@@ -98,7 +98,7 @@
+  *
+  * Return buffer_head of bitmap on success or NULL.
+  */
+-static struct buffer_head *
++struct buffer_head *
+ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ {
+ 	struct ext4_group_desc *desc;
+@@ -161,6 +161,7 @@
+ 	}
+ 	return bh;
+ }
++EXPORT_SYMBOL(ext4_read_inode_bitmap);
+ 
+ /*
+  * NOTE! When we get the inode, we're the only people
+Index: linux-source-2.6.32/fs/ext4/balloc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/balloc.c	2012-06-28 12:08:37.973664862 +0200
++++ linux-source-2.6.32/fs/ext4/balloc.c	2012-06-28 12:09:19.417666703 +0200
+@@ -232,6 +232,7 @@
+ 		*bh = sbi->s_group_desc[group_desc];
+ 	return desc;
+ }
++EXPORT_SYMBOL(ext4_get_group_desc);
+ 
+ static int ext4_valid_block_bitmap(struct super_block *sb,
+ 					struct ext4_group_desc *desc,
diff --git a/ldiskfs/kernel_patches/patches/ext4-alloc-policy-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-alloc-policy-2.6.32-vanilla.patch
new file mode 100644
index 0000000..9f72327
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-alloc-policy-2.6.32-vanilla.patch
@@ -0,0 +1,87 @@
+Index: linux-source-2.6.32/fs/ext4/ialloc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:10:06.381677398 +0200
++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:10:30.005662279 +0200
+@@ -1086,6 +1086,36 @@
+ 	return ERR_PTR(err);
+ }
+ 
++unsigned long ext4_find_reverse(struct super_block *sb)
++{
++	struct ext4_group_desc *desc;
++	struct buffer_head *bitmap_bh = NULL;
++	int group;
++	unsigned long ino, offset;
++
++	for (offset = (EXT4_INODES_PER_GROUP(sb) >> 1); offset >= 0;
++	     offset >>= 1) {
++		for (group = EXT4_SB(sb)->s_groups_count - 1; group >= 0;
++		     --group) {
++			desc = ext4_get_group_desc(sb, group, NULL);
++			if (ext4_free_inodes_count(sb, desc) == 0)
++				continue;
++
++			bitmap_bh = ext4_read_inode_bitmap(sb, group);
++			if (!bitmap_bh)
++				continue;
++
++			ino = ext4_find_next_zero_bit((unsigned long *)
++					bitmap_bh->b_data,
++					EXT4_INODES_PER_GROUP(sb), offset);
++			if (ino < EXT4_INODES_PER_GROUP(sb))
++				return (group * EXT4_INODES_PER_GROUP(sb) +
++				       ino + 1);
++		}
++	}
++	return 0;
++}
++
+ /* Verify that we are loading a valid orphan from disk */
+ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+ {
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:09.617666134 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:30.005662279 +0200
+@@ -155,6 +155,12 @@
+ 	u32		ldp_magic;
+ };
+ 
++/* Only use the least 3 bits of ldp_flags for goal policy */
++typedef enum {
++	DP_GOAL_POLICY       = 0,
++	DP_LASTGROUP_REVERSE = 1,
++} dp_policy_t;
++
+ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
+ static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
+ static inline unsigned dx_get_hash(struct dx_entry *entry);
+@@ -1802,8 +1808,14 @@
+ 	if (dentry->d_fsdata != NULL) {
+ 		struct lvfs_dentry_params *param = dentry->d_fsdata;
+ 
+-		if (param->ldp_magic == LVFS_DENTRY_PARAM_MAGIC)
+-			inum = param->ldp_inum;
++		if (param->ldp_magic == LVFS_DENTRY_PARAM_MAGIC) {
++			if ((dp_policy_t)(param->ldp_flags & 0x7) ==
++			    DP_LASTGROUP_REVERSE)
++				inum = ext4_find_reverse(sb);
++			else /* DP_GOAL_POLICY */
++				inum = param->ldp_inum;
++		}
++
+ 	}
+ 	return inum;
+ }
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:23.325664479 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:30.009672059 +0200
+@@ -1581,6 +1581,7 @@
+ extern struct inode *ext4_new_inode(handle_t *, struct inode *, int,
+ 				    const struct qstr *qstr, __u32 goal);
+ extern void ext4_free_inode(handle_t *, struct inode *);
++extern unsigned long ext4_find_reverse(struct super_block *);
+ extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
+ extern unsigned long ext4_count_free_inodes(struct super_block *);
+ extern unsigned long ext4_count_dirs(struct super_block *);
diff --git a/ldiskfs/kernel_patches/patches/ext4-back-dquot-to-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-back-dquot-to-2.6.32-vanilla.patch
new file mode 100644
index 0000000..0025564
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-back-dquot-to-2.6.32-vanilla.patch
@@ -0,0 +1,54 @@
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:11:24.113671273 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:11:27.705663349 +0200
+@@ -1017,9 +1017,47 @@
+ static ssize_t ext4_quota_write(struct super_block *sb, int type,
+ 				const char *data, size_t len, loff_t off);
+ 
++static int ext4_dquot_initialize(struct inode *inode, int type)
++{
++	handle_t *handle;
++	int ret, err;
++
++	/* We may create quota structure so we need to reserve enough blocks */
++	handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++	ret = dquot_initialize(inode, type);
++	err = ext4_journal_stop(handle);
++	if (!ret)
++		ret = err;
++	return ret;
++}
++
++static int ext4_dquot_drop(struct inode *inode)
++{
++	handle_t *handle;
++	int ret, err;
++
++	/* We may delete quota structure so we need to reserve enough blocks */
++	handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
++	if (IS_ERR(handle)) {
++		/*
++		 * We call dquot_drop() anyway to at least release references
++		 * to quota structures so that umount does not hang.
++		 */
++		dquot_drop(inode);
++		return PTR_ERR(handle);
++	}
++	ret = dquot_drop(inode);
++	err = ext4_journal_stop(handle);
++	if (!ret)
++		ret = err;
++	return ret;
++}
++
+ static const struct dquot_operations ext4_quota_operations = {
+-	.initialize	= dquot_initialize,
+-	.drop		= dquot_drop,
++	.initialize	= ext4_dquot_initialize,
++	.drop		= ext4_dquot_drop,
+ 	.alloc_space	= dquot_alloc_space,
+ 	.reserve_space	= dquot_reserve_space,
+ 	.claim_space	= dquot_claim_space,
diff --git a/ldiskfs/kernel_patches/patches/ext4-big-endian-check-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-big-endian-check-2.6.32-vanilla.patch
new file mode 100644
index 0000000..2ca65a5
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-big-endian-check-2.6.32-vanilla.patch
@@ -0,0 +1,57 @@
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:23.337667624 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:26.929665337 +0200
+@@ -72,6 +72,8 @@
+ static int ext4_freeze(struct super_block *sb);
+ 
+ 
++static int bigendian_extents;
++
+ ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
+ 			       struct ext4_group_desc *bg)
+ {
+@@ -1107,7 +1109,7 @@
+ 	Opt_stripe, Opt_delalloc, Opt_nodelalloc,
+ 	Opt_block_validity, Opt_noblock_validity,
+ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
+-	Opt_mballoc,
++	Opt_mballoc, Opt_bigendian_extents,
+ 	Opt_discard, Opt_nodiscard,
+ };
+ 
+@@ -1177,6 +1179,7 @@
+ 	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
+ 	{Opt_auto_da_alloc, "auto_da_alloc"},
+ 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
++	{Opt_bigendian_extents, "bigendian_extents"},
+ 	{Opt_mballoc, "mballoc"},
+ 	{Opt_discard, "discard"},
+ 	{Opt_nodiscard, "nodiscard"},
+@@ -1612,6 +1615,9 @@
+ 			else
+ 				set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+ 			break;
++		case Opt_bigendian_extents:
++			bigendian_extents = 1;
++			break;
+ 		case Opt_discard:
+ 			set_opt(sbi->s_mount_opt, DISCARD);
+ 			break;
+@@ -2693,6 +2699,16 @@
+ 		goto failed_mount;
+ 	}
+ 
++#ifdef __BIG_ENDIAN
++	if (bigendian_extents == 0) {
++		printk(KERN_ERR "EXT4-fs: extents feature is not guaranteed to "
++		       "work on big-endian systems. Use \"bigendian_extents\" "
++		       "mount option to override.\n");
++		goto failed_mount;
++	}
++#endif
++
++
+ #ifdef CONFIG_PROC_FS
+ 	if (ext4_proc_root)
+ 		sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root);
diff --git a/ldiskfs/kernel_patches/patches/ext4-disable-mb-cache-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-disable-mb-cache-2.6.32-vanilla.patch
new file mode 100644
index 0000000..77fb42d
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-disable-mb-cache-2.6.32-vanilla.patch
@@ -0,0 +1,154 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:11:20.317665268 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:24.109662558 +0200
+@@ -827,7 +827,8 @@
+ /*
+  * Mount flags
+  */
+-#define EXT4_MOUNT_OLDALLOC		0x00002  /* Don't use the new Orlov allocator */
++#define EXT4_MOUNT_NO_MBCACHE		0x00001 /* Disable mbcache */
++#define EXT4_MOUNT_OLDALLOC		0x00002 /* Don't use the new Orlov allocator */
+ #define EXT4_MOUNT_GRPID		0x00004	/* Create files with directory's group */
+ #define EXT4_MOUNT_DEBUG		0x00008	/* Some debugging messages */
+ #define EXT4_MOUNT_ERRORS_CONT		0x00010	/* Continue on errors */
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:54.885674648 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:11:24.113671273 +0200
+@@ -1118,6 +1118,7 @@
+ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
+ 	Opt_mballoc, Opt_bigendian_extents, Opt_force_over_128tb,
+ 	Opt_extents, Opt_noextents,
++	Opt_no_mbcache,
+ 	Opt_discard, Opt_nodiscard,
+ };
+ 
+@@ -1190,6 +1191,7 @@
+ 	{Opt_bigendian_extents, "bigendian_extents"},
+ 	{Opt_force_over_128tb, "force_over_128tb"},
+ 	{Opt_mballoc, "mballoc"},
++	{Opt_no_mbcache, "no_mbcache"},
+ 	{Opt_extents, "extents"},
+ 	{Opt_noextents, "noextents"},
+ 	{Opt_discard, "discard"},
+@@ -1667,6 +1669,9 @@
+ 			}
+ 			clear_opt(sbi->s_mount_opt, EXTENTS);
+ 			break;
++		case Opt_no_mbcache:
++			set_opt(sbi->s_mount_opt, NO_MBCACHE);
++			break;
+ 		default:
+ 			ext4_msg(sb, KERN_ERR,
+ 			       "Unrecognized mount option \"%s\" "
+Index: linux-source-2.6.32/fs/ext4/xattr.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/xattr.c	2012-06-28 12:11:20.321664768 +0200
++++ linux-source-2.6.32/fs/ext4/xattr.c	2012-06-28 12:11:24.117664822 +0200
+@@ -86,7 +86,8 @@
+ # define ea_bdebug(f...)
+ #endif
+ 
+-static void ext4_xattr_cache_insert(struct buffer_head *);
++static void ext4_xattr_cache_insert(struct super_block *,
++				    struct buffer_head *);
+ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
+ 						 struct ext4_xattr_header *,
+ 						 struct mb_cache_entry **);
+@@ -332,7 +333,7 @@
+ 		error = -EIO;
+ 		goto cleanup;
+ 	}
+-	ext4_xattr_cache_insert(bh);
++	ext4_xattr_cache_insert(inode->i_sb, bh);
+ 	entry = BFIRST(bh);
+ 	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
+ 				      inode);
+@@ -491,7 +492,7 @@
+ 		error = -EIO;
+ 		goto cleanup;
+ 	}
+-	ext4_xattr_cache_insert(bh);
++	ext4_xattr_cache_insert(inode->i_sb, bh);
+ 	error = ext4_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
+ 
+ cleanup:
+@@ -588,7 +589,9 @@
+ 	struct mb_cache_entry *ce = NULL;
+ 	int error = 0;
+ 
+-	ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
++	if (!test_opt(inode->i_sb, NO_MBCACHE))
++		ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev,
++					bh->b_blocknr);
+ 	error = ext4_journal_get_write_access(handle, bh);
+ 	if (error)
+ 		goto out;
+@@ -987,8 +990,10 @@
+ #define header(x) ((struct ext4_xattr_header *)(x))
+ 
+ 	if (s->base) {
+-		ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
+-					bs->bh->b_blocknr);
++		if (!test_opt(inode->i_sb, NO_MBCACHE))
++			ce = mb_cache_entry_get(ext4_xattr_cache,
++						bs->bh->b_bdev,
++						bs->bh->b_blocknr);
+ 		error = ext4_journal_get_write_access(handle, bs->bh);
+ 		if (error)
+ 			goto cleanup;
+@@ -1005,7 +1010,7 @@
+ 				if (!IS_LAST_ENTRY(s->first))
+ 					ext4_xattr_rehash(header(s->base),
+ 							  s->here);
+-				ext4_xattr_cache_insert(bs->bh);
++				ext4_xattr_cache_insert(sb, bs->bh);
+ 			}
+ 			unlock_buffer(bs->bh);
+ 			if (error == -EIO)
+@@ -1088,7 +1093,8 @@
+ 				if (error)
+ 					goto cleanup_dquot;
+ 			}
+-			mb_cache_entry_release(ce);
++			if (ce)
++				mb_cache_entry_release(ce);
+ 			ce = NULL;
+ 		} else if (bs->bh && s->base == bs->bh->b_data) {
+ 			/* We were modifying this block in-place. */
+@@ -1132,7 +1138,7 @@
+ 			memcpy(new_bh->b_data, s->base, new_bh->b_size);
+ 			set_buffer_uptodate(new_bh);
+ 			unlock_buffer(new_bh);
+-			ext4_xattr_cache_insert(new_bh);
++			ext4_xattr_cache_insert(sb, new_bh);
+ 			error = ext4_handle_dirty_metadata(handle,
+ 							   inode, new_bh);
+ 			if (error)
+@@ -1754,12 +1760,15 @@
+  * Returns 0, or a negative error number on failure.
+  */
+ static void
+-ext4_xattr_cache_insert(struct buffer_head *bh)
++ext4_xattr_cache_insert(struct super_block *sb, struct buffer_head *bh)
+ {
+ 	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
+ 	struct mb_cache_entry *ce;
+ 	int error;
+ 
++	if (test_opt(sb, NO_MBCACHE))
++		return;
++
+ 	ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
+ 	if (!ce) {
+ 		ea_bdebug(bh, "out of memory");
+@@ -1832,6 +1841,8 @@
+ 	__u32 hash = le32_to_cpu(header->h_hash);
+ 	struct mb_cache_entry *ce;
+ 
++	if (test_opt(inode->i_sb, NO_MBCACHE))
++		return NULL;
+ 	if (!header->h_hash)
+ 		return NULL;  /* never share */
+ 	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
diff --git a/ldiskfs/kernel_patches/patches/ext4-dynlocks-common-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-dynlocks-common-2.6.32-vanilla.patch
new file mode 100644
index 0000000..07fad0b
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-dynlocks-common-2.6.32-vanilla.patch
@@ -0,0 +1,350 @@
+Index: linux-source-2.6.32/fs/ext4/dynlocks.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-source-2.6.32/fs/ext4/dynlocks.c	2012-06-28 12:10:45.425668386 +0200
+@@ -0,0 +1,236 @@
++/*
++ * Dynamic Locks
++ *
++ * struct dynlock is lockspace
++ * one may request lock (exclusive or shared) for some value
++ * in that lockspace
++ *
++ */
++
++#include <linux/dynlocks.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++
++#define DYNLOCK_HANDLE_MAGIC	0xd19a10c
++#define DYNLOCK_HANDLE_DEAD	0xd1956ee
++#define DYNLOCK_LIST_MAGIC	0x11ee91e6
++
++static struct kmem_cache * dynlock_cachep = NULL;
++
++struct dynlock_handle {
++	unsigned 		dh_magic;
++	struct list_head	dh_list;
++	unsigned long		dh_value;	/* lock value */
++	int			dh_refcount;	/* number of users */
++	int			dh_readers;
++	int			dh_writers;
++	int			dh_pid;		/* holder of the lock */
++	wait_queue_head_t	dh_wait;
++};
++
++int __init dynlock_cache_init(void)
++{
++	int rc = 0;
++
++	/* printk(KERN_INFO "init dynlocks cache\n"); */
++	dynlock_cachep = kmem_cache_create("dynlock_cache",
++					 sizeof(struct dynlock_handle),
++					 0,
++					 SLAB_HWCACHE_ALIGN,
++					 NULL);
++	if (dynlock_cachep == NULL) {
++		printk(KERN_ERR "Not able to create dynlock cache");
++		rc = -ENOMEM;
++	}
++	return rc;
++}
++
++void dynlock_cache_exit(void)
++{
++	/* printk(KERN_INFO "exit dynlocks cache\n"); */
++	kmem_cache_destroy(dynlock_cachep);
++}
++
++/*
++ * dynlock_init
++ *
++ * initialize lockspace
++ *
++ */
++void dynlock_init(struct dynlock *dl)
++{
++	spin_lock_init(&dl->dl_list_lock);
++	INIT_LIST_HEAD(&dl->dl_list);
++	dl->dl_magic = DYNLOCK_LIST_MAGIC;
++}
++EXPORT_SYMBOL(dynlock_init);
++
++/*
++ * dynlock_lock
++ *
++ * acquires lock (exclusive or shared) in specified lockspace
++ * each lock in lockspace is allocated separately, so user have
++ * to specify GFP flags.
++ * routine returns pointer to lock. this pointer is intended to
++ * be passed to dynlock_unlock
++ *
++ */
++struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
++				    enum dynlock_type lt, gfp_t gfp)
++{
++	struct dynlock_handle *nhl = NULL;
++	struct dynlock_handle *hl;
++
++	BUG_ON(dl == NULL);
++	BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
++
++repeat:
++	/* find requested lock in lockspace */
++	spin_lock(&dl->dl_list_lock);
++	BUG_ON(dl->dl_list.next == NULL);
++	BUG_ON(dl->dl_list.prev == NULL);
++	list_for_each_entry(hl, &dl->dl_list, dh_list) {
++		BUG_ON(hl->dh_list.next == NULL);
++		BUG_ON(hl->dh_list.prev == NULL);
++		BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
++		if (hl->dh_value == value) {
++			/* lock is found */
++			if (nhl) {
++				/* someone else just allocated
++				 * lock we didn't find and just created
++				 * so, we drop our lock
++				 */
++				kmem_cache_free(dynlock_cachep, nhl);
++				nhl = NULL;
++			}
++			hl->dh_refcount++;
++			goto found;
++		}
++	}
++	/* lock not found */
++	if (nhl) {
++		/* we already have allocated lock. use it */
++		hl = nhl;
++		nhl = NULL;
++		list_add(&hl->dh_list, &dl->dl_list);
++		goto found;
++	}
++	spin_unlock(&dl->dl_list_lock);
++
++	/* lock not found and we haven't allocated lock yet. allocate it */
++	nhl = kmem_cache_alloc(dynlock_cachep, gfp);
++	if (nhl == NULL)
++		return NULL;
++	nhl->dh_refcount = 1;
++	nhl->dh_value = value;
++	nhl->dh_readers = 0;
++	nhl->dh_writers = 0;
++	nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
++	init_waitqueue_head(&nhl->dh_wait);
++
++	/* while lock is being allocated, someone else may allocate it
++	 * and put onto to list. check this situation
++	 */
++	goto repeat;
++
++found:
++	if (lt == DLT_WRITE) {
++		/* exclusive lock: user don't want to share lock at all
++		 * NOTE: one process may take the same lock several times
++		 * this functionaly is useful for rename operations */
++		while ((hl->dh_writers && hl->dh_pid != current->pid) ||
++				hl->dh_readers) {
++			spin_unlock(&dl->dl_list_lock);
++			wait_event(hl->dh_wait,
++				hl->dh_writers == 0 && hl->dh_readers == 0);
++			spin_lock(&dl->dl_list_lock);
++		}
++		hl->dh_writers++;
++	} else {
++		/* shared lock: user do not want to share lock with writer */
++		while (hl->dh_writers) {
++			spin_unlock(&dl->dl_list_lock);
++			wait_event(hl->dh_wait, hl->dh_writers == 0);
++			spin_lock(&dl->dl_list_lock);
++		}
++		hl->dh_readers++;
++	}
++	hl->dh_pid = current->pid;
++	spin_unlock(&dl->dl_list_lock);
++
++	return hl;
++}
++EXPORT_SYMBOL(dynlock_lock);
++
++
++/*
++ * dynlock_unlock
++ *
++ * user have to specify lockspace (dl) and pointer to lock structure
++ * returned by dynlock_lock()
++ *
++ */
++void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
++{
++	int wakeup = 0;
++
++	BUG_ON(dl == NULL);
++	BUG_ON(hl == NULL);
++	BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
++
++	if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
++		printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
++
++	BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
++	BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
++
++	spin_lock(&dl->dl_list_lock);
++	if (hl->dh_writers) {
++		BUG_ON(hl->dh_readers != 0);
++		hl->dh_writers--;
++		if (hl->dh_writers == 0)
++			wakeup = 1;
++	} else if (hl->dh_readers) {
++		hl->dh_readers--;
++		if (hl->dh_readers == 0)
++			wakeup = 1;
++	} else {
++		BUG();
++	}
++	if (wakeup) {
++		hl->dh_pid = 0;
++		wake_up(&hl->dh_wait);
++	}
++	if (--(hl->dh_refcount) == 0) {
++		hl->dh_magic = DYNLOCK_HANDLE_DEAD;
++		list_del(&hl->dh_list);
++		kmem_cache_free(dynlock_cachep, hl);
++	}
++	spin_unlock(&dl->dl_list_lock);
++}
++EXPORT_SYMBOL(dynlock_unlock);
++
++int dynlock_is_locked(struct dynlock *dl, unsigned long value)
++{
++	struct dynlock_handle *hl;
++	int result = 0;
++
++	/* find requested lock in lockspace */
++	spin_lock(&dl->dl_list_lock);
++	BUG_ON(dl->dl_list.next == NULL);
++	BUG_ON(dl->dl_list.prev == NULL);
++	list_for_each_entry(hl, &dl->dl_list, dh_list) {
++		BUG_ON(hl->dh_list.next == NULL);
++		BUG_ON(hl->dh_list.prev == NULL);
++		BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
++		if (hl->dh_value == value && hl->dh_pid == current->pid) {
++			/* lock is found */
++			result = 1;
++			break;
++		}
++	}
++	spin_unlock(&dl->dl_list_lock);
++	return result;
++}
++EXPORT_SYMBOL(dynlock_is_locked);
+Index: linux-source-2.6.32/include/linux/dynlocks.h
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-source-2.6.32/include/linux/dynlocks.h	2012-06-28 12:10:45.425668386 +0200
+@@ -0,0 +1,34 @@
++#ifndef _LINUX_DYNLOCKS_H
++#define _LINUX_DYNLOCKS_H
++
++#include <linux/list.h>
++#include <linux/wait.h>
++
++struct dynlock_handle;
++
++/*
++ * lock's namespace:
++ *   - list of locks
++ *   - lock to protect this list
++ */
++struct dynlock {
++	unsigned		dl_magic;
++	struct list_head	dl_list;
++	spinlock_t		dl_list_lock;
++};
++
++enum dynlock_type {
++	DLT_WRITE,
++	DLT_READ
++};
++
++int dynlock_cache_init(void);
++void dynlock_cache_exit(void);
++void dynlock_init(struct dynlock *dl);
++struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
++				    enum dynlock_type lt, gfp_t gfp);
++void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
++int dynlock_is_locked(struct dynlock *dl, unsigned long value);
++
++#endif
++
+Index: linux-source-2.6.32/fs/ext4/Makefile
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/Makefile	2012-06-28 12:09:59.685666701 +0200
++++ linux-source-2.6.32/fs/ext4/Makefile	2012-06-28 12:10:45.425668386 +0200
+@@ -7,7 +7,7 @@
+ ext4-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+ 		ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
+ 		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
+-		mmp.o
++		mmp.o dynlocks.o
+ 
+ ext4-$(CONFIG_EXT4_FS_XATTR)		+= xattr.o xattr_user.o xattr_trusted.o
+ ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:42.285668861 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:45.429666724 +0200
+@@ -4082,32 +4082,37 @@
+ 		return err;
+ 	ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
+ 	if (!ext4_kset)
+-		goto out4;
++		goto out5;
+ 	ext4_proc_root = proc_mkdir("fs/ext4", NULL);
+ 	err = init_ext4_mballoc();
+ 	if (err)
+-		goto out3;
++		goto out4;
+ 
+ 	err = init_ext4_xattr();
+ 	if (err)
+-		goto out2;
++		goto out3;
+ 	err = init_inodecache();
+ 	if (err)
++		goto out2;
++	err = dynlock_cache_init();
++	if (err)
+ 		goto out1;
+ 	err = register_filesystem(&ext4_fs_type);
+ 	if (err)
+ 		goto out;
+ 	return 0;
+ out:
+-	destroy_inodecache();
++	dynlock_cache_exit();
+ out1:
+-	exit_ext4_xattr();
++	destroy_inodecache();
+ out2:
+-	exit_ext4_mballoc();
++	exit_ext4_xattr();
+ out3:
++	exit_ext4_mballoc();
++out4:
+ 	remove_proc_entry("fs/ext4", NULL);
+ 	kset_unregister(ext4_kset);
+-out4:
++out5:
+ 	exit_ext4_system_zone();
+ 	return err;
+ }
+@@ -4115,6 +4120,7 @@
+ static void __exit exit_ext4_fs(void)
+ {
+ 	unregister_filesystem(&ext4_fs_type);
++	dynlock_cache_exit();
+ 	destroy_inodecache();
+ 	exit_ext4_xattr();
+ 	exit_ext4_mballoc();
diff --git a/ldiskfs/kernel_patches/patches/ext4-export-64bit-name-hash-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-export-64bit-name-hash-2.6.32-vanilla.patch
new file mode 100644
index 0000000..6745d2f
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-export-64bit-name-hash-2.6.32-vanilla.patch
@@ -0,0 +1,134 @@
+Index: linux-source-2.6.32/fs/ext4/dir.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/dir.c	2012-06-28 12:11:16.361665139 +0200
++++ linux-source-2.6.32/fs/ext4/dir.c	2012-06-28 12:11:34.597665360 +0200
+@@ -246,22 +246,50 @@
+ 	return ret;
+ }
+ 
++static inline int is_32bit_api(void)
++{
++#ifdef HAVE_IS_COMPAT_TASK
++        return is_compat_task();
++#else
++        return (BITS_PER_LONG == 32);
++#endif
++}
++
+ /*
+  * These functions convert from the major/minor hash to an f_pos
+  * value.
+  *
+- * Currently we only use major hash numer.  This is unfortunate, but
+- * on 32-bit machines, the same VFS interface is used for lseek and
+- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+- * lseek/telldir/seekdir will blow out spectacularly, and from within
+- * the ext2 low-level routine, we don't know if we're being called by
+- * a 64-bit version of the system call or the 32-bit version of the
+- * system call.  Worse yet, NFSv2 only allows for a 32-bit readdir
+- * cookie.  Sigh.
++ * Up layer (OSD) should specify O_32BITHASH or O_64BITHASH explicitly.
++ * On the other hand, we allow ldiskfs to be mounted directly on both 32-bit
++ * and 64-bit nodes, under such case, neither O_32BITHASH nor O_64BITHASH is
++ * specified.
+  */
+-#define hash2pos(major, minor)	(major >> 1)
+-#define pos2maj_hash(pos)	((pos << 1) & 0xffffffff)
+-#define pos2min_hash(pos)	(0)
++static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
++{
++	if ((filp->f_flags & O_32BITHASH) ||
++	    (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
++		return (major >> 1);
++	else
++		return (((__u64)(major >> 1) << 32) | (__u64)minor);
++}
++
++static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
++{
++	if ((filp->f_flags & O_32BITHASH) ||
++	    (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
++		return ((pos << 1) & 0xffffffff);
++	else
++		return (((pos >> 32) << 1) & 0xffffffff);
++}
++
++static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
++{
++	if ((filp->f_flags & O_32BITHASH) ||
++	    (!(filp->f_flags & O_64BITHASH) && is_32bit_api()))
++		return (0);
++	else
++		return (pos & 0xffffffff);
++}
+ 
+ /*
+  * This structure holds the nodes of the red-black tree used to store
+@@ -322,15 +350,16 @@
+ }
+ 
+ 
+-static struct dir_private_info *ext4_htree_create_dir_info(loff_t pos)
++static struct dir_private_info *
++ext4_htree_create_dir_info(struct file *filp, loff_t pos)
+ {
+ 	struct dir_private_info *p;
+ 
+ 	p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+ 	if (!p)
+ 		return NULL;
+-	p->curr_hash = pos2maj_hash(pos);
+-	p->curr_minor_hash = pos2min_hash(pos);
++	p->curr_hash = pos2maj_hash(filp, pos);
++	p->curr_minor_hash = pos2min_hash(filp, pos);
+ 	return p;
+ }
+ 
+@@ -426,7 +455,7 @@
+ 		       "null fname?!?\n");
+ 		return 0;
+ 	}
+-	curr_pos = hash2pos(fname->hash, fname->minor_hash);
++	curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+ 	while (fname) {
+ 		error = filldir(dirent, fname->name,
+ 				fname->name_len, curr_pos,
+@@ -451,7 +480,7 @@
+ 	int	ret;
+ 
+ 	if (!info) {
+-		info = ext4_htree_create_dir_info(filp->f_pos);
++		info = ext4_htree_create_dir_info(filp, filp->f_pos);
+ 		if (!info)
+ 			return -ENOMEM;
+ 		filp->private_data = info;
+@@ -465,8 +494,8 @@
+ 		free_rb_tree_fname(&info->root);
+ 		info->curr_node = NULL;
+ 		info->extra_fname = NULL;
+-		info->curr_hash = pos2maj_hash(filp->f_pos);
+-		info->curr_minor_hash = pos2min_hash(filp->f_pos);
++		info->curr_hash = pos2maj_hash(filp, filp->f_pos);
++		info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+ 	}
+ 
+ 	/*
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:11:24.109662558 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:34.597665360 +0200
+@@ -808,6 +808,14 @@
+ 	__u64 i_fs_version;
+ };
+ 
++#ifndef O_32BITHASH
++# define O_32BITHASH	0x10000000
++#endif
++
++#ifndef O_64BITHASH
++# define O_64BITHASH	0x20000000
++#endif
++
+ #define HAVE_DISK_INODE_VERSION
+ 
+ /*
diff --git a/ldiskfs/kernel_patches/patches/ext4-ext_generation-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-ext_generation-2.6.32-vanilla.patch
new file mode 100644
index 0000000..fc65734
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-ext_generation-2.6.32-vanilla.patch
@@ -0,0 +1,48 @@
+Index: linux-source-2.6.32/fs/ext4/ext4_extents.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4_extents.h	2012-06-28 12:08:35.493670779 +0200
++++ linux-source-2.6.32/fs/ext4/ext4_extents.h	2012-06-28 12:09:30.485668675 +0200
+@@ -194,6 +194,11 @@
+ 	return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
+ }
+ 
++static inline void ext4_ext_tree_changed(struct inode *inode)
++{
++	EXT4_I(inode)->i_ext_generation++;
++}
++
+ static inline void
+ ext4_ext_invalidate_cache(struct inode *inode)
+ {
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:09:19.417666703 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:09:30.485668675 +0200
+@@ -756,6 +756,7 @@
+ 	struct inode vfs_inode;
+ 	struct jbd2_inode jinode;
+ 
++	unsigned long i_ext_generation;
+ 	struct ext4_ext_cache i_cached_extent;
+ 	/*
+ 	 * File creation time. Its function is same as that of
+Index: linux-source-2.6.32/fs/ext4/extents.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/extents.c	2012-06-28 12:08:35.401666340 +0200
++++ linux-source-2.6.32/fs/ext4/extents.c	2012-06-28 12:09:30.489673456 +0200
+@@ -1752,6 +1752,7 @@
+ 		ext4_ext_drop_refs(npath);
+ 		kfree(npath);
+ 	}
++	ext4_ext_tree_changed(inode);
+ 	ext4_ext_invalidate_cache(inode);
+ 	return err;
+ }
+@@ -2380,6 +2381,7 @@
+ 		}
+ 	}
+ out:
++	ext4_ext_tree_changed(inode);
+ 	ext4_ext_drop_refs(path);
+ 	kfree(path);
+ 	if (err == -EAGAIN)
diff --git a/ldiskfs/kernel_patches/patches/ext4-extents-mount-option-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-extents-mount-option-2.6.32-vanilla.patch
new file mode 100644
index 0000000..c7f1de8
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-extents-mount-option-2.6.32-vanilla.patch
@@ -0,0 +1,168 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:42.265665313 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:54.877664607 +0200
+@@ -849,6 +849,7 @@
+ #define EXT4_MOUNT_QUOTA		0x80000 /* Some quota option set */
+ #define EXT4_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
+ #define EXT4_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
++#define EXT4_MOUNT_EXTENTS		0x400000 /* Extents support */
+ #define EXT4_MOUNT_JOURNAL_CHECKSUM	0x800000 /* Journal checksums */
+ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT	0x1000000 /* Journal Async Commit */
+ #define EXT4_MOUNT_I_VERSION            0x2000000 /* i_version support */
+Index: linux-source-2.6.32/fs/ext4/ext4_jbd2.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4_jbd2.h	2012-06-28 12:10:23.325664479 +0200
++++ linux-source-2.6.32/fs/ext4/ext4_jbd2.h	2012-06-28 12:10:54.877664607 +0200
+@@ -33,7 +33,7 @@
+ 
+ #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb)				\
+ 	(EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)   \
+-	 ? 27U : 8U)
++	 || test_opt(sb, EXTENTS) ? 27U : 8U)
+ 
+ #define ext4_journal_dirty_metadata(handle, bh)  \
+ 		ext4_handle_dirty_metadata(handle, NULL, bh)
+Index: linux-source-2.6.32/fs/ext4/extents.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/extents.c	2012-06-28 12:10:23.329669991 +0200
++++ linux-source-2.6.32/fs/ext4/extents.c	2012-06-28 12:10:54.881664295 +0200
+@@ -2449,7 +2449,7 @@
+ 	 * possible initialization would be here
+ 	 */
+ 
+-	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
++	if (test_opt(sb, EXTENTS)) {
+ #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
+ 		printk(KERN_INFO "EXT4-fs: file extents enabled");
+ #ifdef AGGRESSIVE_TEST
+@@ -2476,7 +2476,7 @@
+  */
+ void ext4_ext_release(struct super_block *sb)
+ {
+-	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
++	if (!test_opt(sb, EXTENTS))
+ 		return;
+ 
+ #ifdef EXTENTS_STATS
+Index: linux-source-2.6.32/fs/ext4/ialloc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:10:30.005662279 +0200
++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:10:54.881664295 +0200
+@@ -1047,7 +1047,7 @@
+ 	if (err)
+ 		goto fail_free_drop;
+ 
+-	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
++	if (test_opt(sb, EXTENTS)) {
+ 		/* set extent flag only for directory, file and normal symlink*/
+ 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
+ 			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
+Index: linux-source-2.6.32/fs/ext4/migrate.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/migrate.c	2012-06-28 12:08:18.565662551 +0200
++++ linux-source-2.6.32/fs/ext4/migrate.c	2012-06-28 12:10:54.881664295 +0200
+@@ -459,13 +459,10 @@
+ 	unsigned long max_entries;
+ 	__u32 goal;
+ 
+-	/*
+-	 * If the filesystem does not support extents, or the inode
+-	 * already is extent-based, error out.
+-	 */
+-	if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
+-				       EXT4_FEATURE_INCOMPAT_EXTENTS) ||
+-	    (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
++	if (!test_opt(inode->i_sb, EXTENTS))
++		/*
++		 * if mounted with noextents we don't allow the migrate
++		 */
+ 		return -EINVAL;
+ 
+ 	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:45.429666724 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:54.885674648 +0200
+@@ -895,6 +895,8 @@
+ 		seq_puts(seq, ",journal_checksum");
+ 	if (test_opt(sb, NOBH))
+ 		seq_puts(seq, ",nobh");
++	if (!test_opt(sb, EXTENTS))
++		seq_puts(seq, ",noextents");
+ 	if (test_opt(sb, I_VERSION))
+ 		seq_puts(seq, ",i_version");
+ 	if (!test_opt(sb, DELALLOC))
+@@ -1115,6 +1117,7 @@
+ 	Opt_block_validity, Opt_noblock_validity,
+ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
+ 	Opt_mballoc, Opt_bigendian_extents, Opt_force_over_128tb,
++	Opt_extents, Opt_noextents,
+ 	Opt_discard, Opt_nodiscard,
+ };
+ 
+@@ -1187,6 +1190,8 @@
+ 	{Opt_bigendian_extents, "bigendian_extents"},
+ 	{Opt_force_over_128tb, "force_over_128tb"},
+ 	{Opt_mballoc, "mballoc"},
++	{Opt_extents, "extents"},
++	{Opt_noextents, "noextents"},
+ 	{Opt_discard, "discard"},
+ 	{Opt_nodiscard, "nodiscard"},
+ 	{Opt_err, NULL},
+@@ -1231,6 +1236,7 @@
+ 	int qtype, qfmt;
+ 	char *qname;
+ #endif
++	ext4_fsblk_t last_block;
+ 
+ 	if (!options)
+ 		return 1;
+@@ -1635,6 +1641,32 @@
+ 		case Opt_force_over_128tb:
+ 			force_over_128tb = 1;
+ 			break;
++		case Opt_extents:
++			if (!EXT4_HAS_INCOMPAT_FEATURE(sb,
++					EXT4_FEATURE_INCOMPAT_EXTENTS)) {
++				ext4_warning(sb, "extents feature not enabled "
++						 "on this filesystem, use tune2fs");
++				return 0;
++			}
++			set_opt(sbi->s_mount_opt, EXTENTS);
++			break;
++		case Opt_noextents:
++			/*
++			 * When e2fsprogs support resizing an already existing
++			 * ext4 file system to greater than 2**32 we need to
++			 * add support to block allocator to handle growing
++			 * already existing block  mapped inode so that blocks
++			 * allocated for them fall within 2**32
++			 */
++			last_block = ext4_blocks_count(sbi->s_es) - 1;
++			if (last_block  > 0xffffffffULL) {
++				printk(KERN_ERR "EXT4-fs: Filesystem too "
++						"large to mount with "
++						"-o noextents options\n");
++				return 0;
++			}
++			clear_opt(sbi->s_mount_opt, EXTENTS);
++			break;
+ 		default:
+ 			ext4_msg(sb, KERN_ERR,
+ 			       "Unrecognized mount option \"%s\" "
+@@ -2499,6 +2531,14 @@
+ 	set_opt(sbi->s_mount_opt, BARRIER);
+ 
+ 	/*
++	 * turn on extents feature by default in ext4 filesystem
++	 * only if feature flag already set by mkfs or tune2fs.
++	 * Use -o noextents to turn it off
++	 */
++	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
++		set_opt(sbi->s_mount_opt, EXTENTS);
++
++	/*
+ 	 * enable delayed allocation by default
+ 	 * Use -o nodelalloc to turn it off
+ 	 */
diff --git a/ldiskfs/kernel_patches/patches/ext4-fiemap-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-fiemap-2.6.32-vanilla.patch
new file mode 100644
index 0000000..c462357
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-fiemap-2.6.32-vanilla.patch
@@ -0,0 +1,111 @@
+This patch adds direct EXT4_IOC_FIEMAP support to ldiskfs, for Lustre to call
+without having to go through do_vfs_ioctl() (which isn't exported, and has a
+number of other ioctls which are not suitable for Lustre). The actual FIEMAP
+support is already in the kernel/ext4 for normal usage.
+
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:54.877664607 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:58.337671542 +0200
+@@ -473,7 +473,7 @@
+ #define EXT4_IOC_GROUP_ADD		_IOW('f', 8, struct ext4_new_group_input)
+ #define EXT4_IOC_MIGRATE		_IO('f', 9)
+  /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
+- /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
++#define EXT4_IOC_FIEMAP			_IOWR('f', 11, struct fiemap)
+ #define EXT4_IOC_ALLOC_DA_BLKS		_IO('f', 12)
+ #define EXT4_IOC_MOVE_EXT		_IOWR('f', 15, struct move_extent)
+ 
+Index: linux-source-2.6.32/fs/ext4/ioctl.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ioctl.c	2012-06-28 12:08:17.325666867 +0200
++++ linux-source-2.6.32/fs/ext4/ioctl.c	2012-06-28 12:10:58.337671542 +0200
+@@ -18,6 +18,71 @@
+ #include "ext4_jbd2.h"
+ #include "ext4.h"
+ 
++/* So that the fiemap access checks can't overflow on 32 bit machines. */
++#define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
++
++static int fiemap_check_ranges(struct super_block *sb,
++			       u64 start, u64 len, u64 *new_len)
++{
++	*new_len = len;
++
++	if (len == 0)
++		return -EINVAL;
++
++	if (start > sb->s_maxbytes)
++		return -EFBIG;
++
++	/*
++	 * Shrink request scope to what the fs can actually handle.
++	 */
++	if ((len > sb->s_maxbytes) ||
++	    (sb->s_maxbytes - len) < start)
++		*new_len = sb->s_maxbytes - start;
++
++	return 0;
++}
++
++int ioctl_fiemap(struct inode *inode, struct file *filp, unsigned long arg)
++{
++	struct fiemap fiemap;
++	u64 len;
++	struct fiemap_extent_info fieinfo = {0, };
++	struct super_block *sb = inode->i_sb;
++	int error = 0;
++
++	if (copy_from_user(&fiemap, (struct fiemap __user *) arg,
++			   sizeof(struct fiemap)))
++		 return -EFAULT;
++
++	if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
++		return -EINVAL;
++
++	error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
++				    &len);
++	if (error)
++		return error;
++
++	fieinfo.fi_flags = fiemap.fm_flags;
++	fieinfo.fi_extents_max = fiemap.fm_extent_count;
++	fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
++
++	if (fiemap.fm_extent_count != 0 &&
++	    !access_ok(VERIFY_WRITE, (void *)arg,
++		       offsetof(typeof(fiemap), fm_extents[fiemap.fm_extent_count])))
++		return -EFAULT;
++
++	if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
++		filemap_write_and_wait(inode->i_mapping);
++
++	error = ext4_fiemap(inode, &fieinfo, fiemap.fm_start, len);
++	fiemap.fm_flags = fieinfo.fi_flags;
++	fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
++	if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
++		error = -EFAULT;
++
++	return error;
++}
++
+ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ 	struct inode *inode = filp->f_dentry->d_inode;
+@@ -330,6 +395,9 @@
+ 		mnt_drop_write(filp->f_path.mnt);
+ 		return err;
+ 	}
++	case EXT4_IOC_FIEMAP: {
++		return ioctl_fiemap(inode, filp, arg);
++	}
+ 
+ 	default:
+ 		return -ENOTTY;
+Index: linux-source-2.6.32/fs/ext4/fiemap.h
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-source-2.6.32/fs/ext4/fiemap.h	2012-06-28 12:10:58.337671542 +0200
+@@ -0,0 +1,2 @@
++
++#include_next <fiemap.h>
diff --git a/ldiskfs/kernel_patches/patches/ext4-force_over_128tb-rhel5.patch b/ldiskfs/kernel_patches/patches/ext4-force_over_128tb-2.6.32-vanilla.patch
similarity index 100%
copy from ldiskfs/kernel_patches/patches/ext4-force_over_128tb-rhel5.patch
copy to ldiskfs/kernel_patches/patches/ext4-force_over_128tb-2.6.32-vanilla.patch
diff --git a/ldiskfs/kernel_patches/patches/ext4-hash-indexed-dir-dotdot-update-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-hash-indexed-dir-dotdot-update-2.6.32-vanilla.patch
new file mode 100644
index 0000000..8ff4099
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-hash-indexed-dir-dotdot-update-2.6.32-vanilla.patch
@@ -0,0 +1,87 @@
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:42.283981062 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:49.061664012 +0200
+@@ -1526,6 +1526,72 @@
+ 	return retval;
+ }
+ 
++/* update ".." for hash-indexed directory, split the item "." if necessary */
++static int ext4_update_dotdot(handle_t *handle, struct dentry *dentry,
++				 struct inode *inode)
++{
++	struct inode * dir = dentry->d_parent->d_inode;
++	struct buffer_head * dir_block;
++	struct ext4_dir_entry_2 * de;
++	int len, journal = 0, err = 0;
++
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
++	if (IS_DIRSYNC(dir))
++		handle->h_sync = 1;
++
++	dir_block = ext4_bread(handle, dir, 0, 0, &err);
++	if (!dir_block)
++		goto out;
++
++	de = (struct ext4_dir_entry_2 *)dir_block->b_data;
++	/* the first item must be "." */
++	assert(de->name_len == 1 && de->name[0] == '.');
++	len = le16_to_cpu(de->rec_len);
++	assert(len >= EXT4_DIR_REC_LEN(1));
++	if (len > EXT4_DIR_REC_LEN(1)) {
++		BUFFER_TRACE(dir_block, "get_write_access");
++		err = ext4_journal_get_write_access(handle, dir_block);
++		if (err)
++			goto out_journal;
++
++		journal = 1;
++		de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
++	}
++
++	len -= EXT4_DIR_REC_LEN(1);
++	assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
++	de = (struct ext4_dir_entry_2 *)
++			((char *) de + le16_to_cpu(de->rec_len));
++	if (!journal) {
++		BUFFER_TRACE(dir_block, "get_write_access");
++		err = ext4_journal_get_write_access(handle, dir_block);
++		if (err)
++			goto out_journal;
++	}
++
++	de->inode = cpu_to_le32(inode->i_ino);
++	if (len > 0)
++		de->rec_len = cpu_to_le16(len);
++	else
++		assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
++	de->name_len = 2;
++	strcpy (de->name, "..");
++	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++
++out_journal:
++	if (journal) {
++		BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
++		err = ext4_handle_dirty_metadata(handle, dir, dir_block);
++		ext4_mark_inode_dirty(handle, dir);
++	}
++	brelse (dir_block);
++
++out:
++	return err;
++}
++
+ /*
+  *	ext4_add_entry()
+  *
+@@ -1553,6 +1619,9 @@
+ 	if (!dentry->d_name.len)
+ 		return -EINVAL;
+ 	if (is_dx(dir)) {
++		if (dentry->d_name.len == 2 &&
++		    memcmp(dentry->d_name.name, "..", 2) == 0)
++			return ext4_update_dotdot(handle, dentry, inode);
+ 		retval = ext4_dx_add_entry(handle, dentry, inode);
+ 		if (!retval || (retval != ERR_BAD_DX_DIR))
+ 			return retval;
diff --git a/ldiskfs/kernel_patches/patches/ext4-inode-version-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-inode-version-2.6.32-vanilla.patch
new file mode 100644
index 0000000..3d13724
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-inode-version-2.6.32-vanilla.patch
@@ -0,0 +1,63 @@
+Index: linux-source-2.6.32/fs/ext4/inode.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:09:14.201666378 +0200
++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:09:34.579776667 +0200
+@@ -4985,11 +4985,11 @@
+ 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
+ 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
+ 
+-	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
++	ei->i_fs_version = le32_to_cpu(raw_inode->i_disk_version);
+ 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+ 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
+-			inode->i_version |=
+-			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
++			ei->i_fs_version |= (__u64)(le32_to_cpu(raw_inode->i_version_hi))
++									 << 32;
+ 	}
+ 
+ 	ret = 0;
+@@ -5199,11 +5199,11 @@
+ 		for (block = 0; block < EXT4_N_BLOCKS; block++)
+ 			raw_inode->i_block[block] = ei->i_data[block];
+ 
+-	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
++	raw_inode->i_disk_version = cpu_to_le32(ei->i_fs_version);
+ 	if (ei->i_extra_isize) {
+ 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
+-			raw_inode->i_version_hi =
+-			cpu_to_le32(inode->i_version >> 32);
++			raw_inode->i_version_hi = cpu_to_le32(ei->i_fs_version
++							      >> 32);
+ 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
+ 	}
+ 
+Index: linux-source-2.6.32/fs/ext4/ialloc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:09:23.393677834 +0200
++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:09:34.581668116 +0200
+@@ -1011,6 +1011,7 @@
+ 	ei->i_dtime = 0;
+ 	ei->i_block_group = group;
+ 	ei->i_last_alloc_group = ~0;
++	ei->i_fs_version = 0;
+ 
+ 	ext4_set_inode_flags(inode);
+ 	if (IS_DIRSYNC(inode))
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:09:30.485668675 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:09:34.581668116 +0200
+@@ -799,8 +799,12 @@
+ 	 */
+ 	tid_t i_sync_tid;
+ 	tid_t i_datasync_tid;
++
++	__u64 i_fs_version;
+ };
+ 
++#define HAVE_DISK_INODE_VERSION
++
+ /*
+  * File system states
+  */
diff --git a/ldiskfs/kernel_patches/patches/ext4-journal-callback-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-journal-callback-2.6.32-vanilla.patch
new file mode 100644
index 0000000..ee6ff80
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-journal-callback-2.6.32-vanilla.patch
@@ -0,0 +1,470 @@
+Index: linux-source-2.6.32/fs/ext4/ext4_jbd2.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4_jbd2.h	2012-06-28 12:10:54.877664607 +0200
++++ linux-source-2.6.32/fs/ext4/ext4_jbd2.h	2012-06-28 12:11:44.185852824 +0200
+@@ -106,6 +106,80 @@
+ #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
+ #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
+ 
++/**
++ *   struct ext4_journal_cb_entry - Base structure for callback information.
++ *
++ *   This struct is a 'seed' structure for a using with your own callback
++ *   structs. If you are using callbacks you must allocate one of these
++ *   or another struct of your own definition which has this struct
++ *   as it's first element and pass it to ext4_journal_callback_add().
++ */
++struct ext4_journal_cb_entry {
++	/* list information for other callbacks attached to the same handle */
++	struct list_head jce_list;
++
++	/*  Function to call with this callback structure */
++	void (*jce_func)(struct super_block *sb,
++			 struct ext4_journal_cb_entry *jce, int error);
++
++	/* user data goes here */
++};
++
++/**
++ * ext4_journal_callback_add: add a function to call after transaction commit
++ * @handle: active journal transaction handle to register callback on
++ * @func: callback function to call after the transaction has committed:
++ *        @sb: superblock of current filesystem for transaction
++ *        @jce: returned journal callback data
++ *        @rc: journal state at commit (0 = transaction committed properly)
++ * @jce: journal callback data (internal and function private data struct)
++ *
++ * The registered function will be called in the context of the journal thread
++ * after the transaction for which the handle was created has completed.
++ *
++ * No locks are held when the callback function is called, so it is safe to
++ * call blocking functions from within the callback, but the callback should
++ * not block or run for too long, or the filesystem will be blocked waiting for
++ * the next transaction to commit. No journaling functions can be used, or
++ * there is a risk of deadlock.
++ *
++ * There is no guaranteed calling order of multiple registered callbacks on
++ * the same transaction.
++ */
++static inline void ext4_journal_callback_add(handle_t *handle,
++			void (*func)(struct super_block *sb,
++				     struct ext4_journal_cb_entry *jce,
++				     int rc),
++			struct ext4_journal_cb_entry *jce)
++{
++	struct ext4_sb_info *sbi =
++			EXT4_SB(handle->h_transaction->t_journal->j_private);
++
++	/* Add the jce to transaction's private list */
++	jce->jce_func = func;
++	spin_lock(&sbi->s_md_lock);
++	list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list);
++	spin_unlock(&sbi->s_md_lock);
++}
++
++/**
++ * ext4_journal_callback_del: delete a registered callback
++ * @handle: active journal transaction handle on which callback was registered
++ * @jce: registered journal callback entry to unregister
++ */
++static inline void ext4_journal_callback_del(handle_t *handle,
++					     struct ext4_journal_cb_entry *jce)
++{
++	struct ext4_sb_info *sbi =
++			EXT4_SB(handle->h_transaction->t_journal->j_private);
++
++	spin_lock(&sbi->s_md_lock);
++	list_del_init(&jce->jce_list);
++	spin_unlock(&sbi->s_md_lock);
++}
++
++#define HAVE_EXT4_JOURNAL_CALLBACK_ADD
++
+ int
+ ext4_mark_iloc_dirty(handle_t *handle,
+ 		     struct inode *inode,
+Index: linux-source-2.6.32/fs/ext4/mballoc.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.h	2012-06-28 12:11:12.525665254 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.h	2012-06-28 12:11:44.185852824 +0200
+@@ -96,23 +96,24 @@
+  */
+ #define MB_DEFAULT_GROUP_PREALLOC	512
+ 
+-
+ struct ext4_free_data {
+-	/* this links the free block information from group_info */
+-	struct rb_node node;
++	/* MUST be the first member */
++	struct ext4_journal_cb_entry	efd_jce;
+ 
+-	/* this links the free block information from ext4_sb_info */
+-	struct list_head list;
++	/* ext4_free_data private data starts from here */
++
++	/* this links the free block information from group_info */
++	struct rb_node		efd_node;
+ 
+ 	/* group which free block extent belongs */
+-	ext4_group_t group;
++	ext4_group_t		efd_group;
+ 
+ 	/* free block extent */
+-	ext4_grpblk_t start_blk;
+-	ext4_grpblk_t count;
++	ext4_grpblk_t		efd_start_blk;
++	ext4_grpblk_t		efd_count;
+ 
+ 	/* transaction which freed this extent */
+-	tid_t	t_tid;
++	tid_t			efd_tid;
+ };
+ 
+ struct ext4_prealloc_space {
+Index: linux-source-2.6.32/fs/ext4/mballoc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:11:38.101746741 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:11:44.193730804 +0200
+@@ -21,6 +21,7 @@
+  * mballoc.c contains the multiblocks allocation routines
+  */
+ 
++#include "ext4_jbd2.h"
+ #include "mballoc.h"
+ #include <linux/debugfs.h>
+ #include <trace/events/ext4.h>
+@@ -336,12 +337,12 @@
+  */
+ static struct kmem_cache *ext4_pspace_cachep;
+ static struct kmem_cache *ext4_ac_cachep;
+-static struct kmem_cache *ext4_free_ext_cachep;
++static struct kmem_cache *ext4_free_data_cachep;
+ static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ 					ext4_group_t group);
+ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+ 						ext4_group_t group);
+-static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
++static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int error);
+ 
+ static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
+ {
+@@ -2652,8 +2653,6 @@
+ 		}
+ 	}
+ 
+-	if (sbi->s_journal)
+-		sbi->s_journal->j_commit_callback = release_blocks_on_commit;
+ 	return 0;
+ }
+ 
+@@ -2745,62 +2744,52 @@
+  * This function is called by the jbd2 layer once the commit has finished,
+  * so we know we can free the blocks that were released with that commit.
+  */
+-static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
++static void ext4_free_data_callback(struct super_block *sb,
++				   struct ext4_journal_cb_entry *jce,
++				   int rc)
+ {
+-	struct super_block *sb = journal->j_private;
++	struct ext4_free_data *entry = (struct ext4_free_data *)jce;
+ 	struct ext4_buddy e4b;
+ 	struct ext4_group_info *db;
+ 	int err, count = 0, count2 = 0;
+-	struct ext4_free_data *entry;
+-	struct list_head *l, *ltmp;
+-
+-	list_for_each_safe(l, ltmp, &txn->t_private_list) {
+-		entry = list_entry(l, struct ext4_free_data, list);
+ 
+-		mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
+-			 entry->count, entry->group, entry);
++	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
++		entry->efd_count, entry->efd_group, entry);
+ 
+-		if (test_opt(sb, DISCARD)) {
+-			int ret;
+-			ext4_fsblk_t discard_block;
+-
+-			discard_block = entry->start_blk +
+-				ext4_group_first_block_no(sb, entry->group);
+-			trace_ext4_discard_blocks(sb,
+-					(unsigned long long)discard_block,
+-					entry->count);
+-			ret = sb_issue_discard(sb, discard_block, entry->count);
+-			if (ret == EOPNOTSUPP) {
+-				ext4_warning(sb, __func__,
+-					"discard not supported, disabling");
+-				clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
+-			}
++	if (test_opt(sb, DISCARD)) {
++		int ret;
++		ret = ext4_issue_discard(sb, entry->efd_group,
++			entry->efd_start_blk, entry->efd_count);
++		if (unlikely(ret == -EOPNOTSUPP)) {
++			ext4_warning(sb, "discard not supported, "
++					"disabling");
++			clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
+ 		}
++	}
+ 
+-		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
+-		/* we expect to find existing buddy because it's pinned */
+-		BUG_ON(err != 0);
+-
+-		db = e4b.bd_info;
+-		/* there are blocks to put in buddy to make them really free */
+-		count += entry->count;
+-		count2++;
+-		ext4_lock_group(sb, entry->group);
+-		/* Take it out of per group rb tree */
+-		rb_erase(&entry->node, &(db->bb_free_root));
+-		mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
+-
+-		if (!db->bb_free_root.rb_node) {
+-			/* No more items in the per group rb tree
+-			 * balance refcounts from ext4_mb_free_metadata()
+-			 */
+-			page_cache_release(e4b.bd_buddy_page);
+-			page_cache_release(e4b.bd_bitmap_page);
+-		}
+-		ext4_unlock_group(sb, entry->group);
+-		kmem_cache_free(ext4_free_ext_cachep, entry);
+-		ext4_mb_unload_buddy(&e4b);
++	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
++	/* we expect to find existing buddy because it's pinned */
++	BUG_ON(err != 0);
++
++	db = e4b.bd_info;
++	/* there are blocks to put in buddy to make them really free */
++	count += entry->efd_count;
++	count2++;
++	ext4_lock_group(sb, entry->efd_group);
++	/* Take it out of per group rb tree */
++	rb_erase(&entry->efd_node, &(db->bb_free_root));
++	mb_free_blocks(NULL, &e4b, entry->efd_start_blk, entry->efd_count);
++
++	if (!db->bb_free_root.rb_node) {
++		/* No more items in the per group rb tree
++		 * balance refcounts from ext4_mb_free_metadata()
++		 */
++		page_cache_release(e4b.bd_buddy_page);
++		page_cache_release(e4b.bd_bitmap_page);
+ 	}
++	ext4_unlock_group(sb, entry->efd_group);
++	kmem_cache_free(ext4_free_data_cachep, entry);
++	ext4_mb_release_desc(&e4b);
+ 
+ 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
+ }
+@@ -2852,22 +2841,22 @@
+ 		kmem_cache_create("ext4_alloc_context",
+ 				     sizeof(struct ext4_allocation_context),
+ 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
+-	if (ext4_ac_cachep == NULL) {
+-		kmem_cache_destroy(ext4_pspace_cachep);
+-		return -ENOMEM;
+-	}
++	if (ext4_ac_cachep == NULL)
++		goto out_err;
++
++	ext4_free_data_cachep =
++		KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT);
++	if (ext4_free_data_cachep == NULL)
++		goto out1_err;
+ 
+-	ext4_free_ext_cachep =
+-		kmem_cache_create("ext4_free_block_extents",
+-				     sizeof(struct ext4_free_data),
+-				     0, SLAB_RECLAIM_ACCOUNT, NULL);
+-	if (ext4_free_ext_cachep == NULL) {
+-		kmem_cache_destroy(ext4_pspace_cachep);
+-		kmem_cache_destroy(ext4_ac_cachep);
+-		return -ENOMEM;
+-	}
+ 	ext4_create_debugfs_entry();
+ 	return 0;
++
++out1_err:
++	kmem_cache_destroy(ext4_ac_cachep);
++out_err:
++	kmem_cache_destroy(ext4_pspace_cachep);
++	return -ENOMEM;
+ }
+ 
+ void exit_ext4_mballoc(void)
+@@ -2879,7 +2868,7 @@
+ 	rcu_barrier();
+ 	kmem_cache_destroy(ext4_pspace_cachep);
+ 	kmem_cache_destroy(ext4_ac_cachep);
+-	kmem_cache_destroy(ext4_free_ext_cachep);
++	kmem_cache_destroy(ext4_free_data_cachep);
+ 	ext4_remove_debugfs_entry();
+ }
+ 
+@@ -3421,8 +3410,8 @@
+ 	n = rb_first(&(grp->bb_free_root));
+ 
+ 	while (n) {
+-		entry = rb_entry(n, struct ext4_free_data, node);
+-		mb_set_bits(bitmap, entry->start_blk, entry->count);
++		entry = rb_entry(n, struct ext4_free_data, efd_node);
++		mb_set_bits(bitmap, entry->efd_start_blk, entry->efd_count);
+ 		n = rb_next(n);
+ 	}
+ 	return;
+@@ -4666,11 +4655,11 @@
+  * AND the blocks are associated with the same group.
+  */
+ static int can_merge(struct ext4_free_data *entry1,
+-			struct ext4_free_data *entry2)
++		     struct ext4_free_data *entry2)
+ {
+-	if ((entry1->t_tid == entry2->t_tid) &&
+-	    (entry1->group == entry2->group) &&
+-	    ((entry1->start_blk + entry1->count) == entry2->start_blk))
++	if ((entry1->efd_tid == entry2->efd_tid) &&
++	    (entry1->efd_group == entry2->efd_group) &&
++	    ((entry1->efd_start_blk + entry1->efd_count) == entry2->efd_start_blk))
+ 		return 1;
+ 	return 0;
+ }
+@@ -4683,7 +4672,6 @@
+ 	struct ext4_free_data *entry;
+ 	struct ext4_group_info *db = e4b->bd_info;
+ 	struct super_block *sb = e4b->bd_sb;
+-	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
+ 	struct rb_node *parent = NULL, *new_node;
+ 
+@@ -4691,8 +4679,8 @@
+ 	BUG_ON(e4b->bd_bitmap_page == NULL);
+ 	BUG_ON(e4b->bd_buddy_page == NULL);
+ 
+-	new_node = &new_entry->node;
+-	block = new_entry->start_blk;
++	new_node = &new_entry->efd_node;
++	block = new_entry->efd_start_blk;
+ 
+ 	if (!*n) {
+ 		/* first free block exent. We need to
+@@ -4705,15 +4693,15 @@
+ 	}
+ 	while (*n) {
+ 		parent = *n;
+-		entry = rb_entry(parent, struct ext4_free_data, node);
+-		if (block < entry->start_blk)
++		entry = rb_entry(parent, struct ext4_free_data, efd_node);
++		if (block < entry->efd_start_blk)
+ 			n = &(*n)->rb_left;
+-		else if (block >= (entry->start_blk + entry->count))
++		else if (block >= (entry->efd_start_blk + entry->efd_count))
+ 			n = &(*n)->rb_right;
+ 		else {
+ 			ext4_grp_locked_error(sb, e4b->bd_group, __func__,
+ 					"Double free of blocks %d (%d %d)",
+-					block, entry->start_blk, entry->count);
++					block, entry->efd_start_blk, entry->efd_count);
+ 			return 0;
+ 		}
+ 	}
+@@ -4724,34 +4712,29 @@
+ 	/* Now try to see the extent can be merged to left and right */
+ 	node = rb_prev(new_node);
+ 	if (node) {
+-		entry = rb_entry(node, struct ext4_free_data, node);
++		entry = rb_entry(node, struct ext4_free_data, efd_node);
+ 		if (can_merge(entry, new_entry)) {
+-			new_entry->start_blk = entry->start_blk;
+-			new_entry->count += entry->count;
++			new_entry->efd_start_blk = entry->efd_start_blk;
++			new_entry->efd_count += entry->efd_count;
+ 			rb_erase(node, &(db->bb_free_root));
+-			spin_lock(&sbi->s_md_lock);
+-			list_del(&entry->list);
+-			spin_unlock(&sbi->s_md_lock);
+-			kmem_cache_free(ext4_free_ext_cachep, entry);
++			ext4_journal_callback_del(handle, &entry->efd_jce);
++			kmem_cache_free(ext4_free_data_cachep, entry);
+ 		}
+ 	}
+ 
+ 	node = rb_next(new_node);
+ 	if (node) {
+-		entry = rb_entry(node, struct ext4_free_data, node);
++		entry = rb_entry(node, struct ext4_free_data, efd_node);
+ 		if (can_merge(new_entry, entry)) {
+-			new_entry->count += entry->count;
++			new_entry->efd_count += entry->efd_count;
+ 			rb_erase(node, &(db->bb_free_root));
+-			spin_lock(&sbi->s_md_lock);
+-			list_del(&entry->list);
+-			spin_unlock(&sbi->s_md_lock);
+-			kmem_cache_free(ext4_free_ext_cachep, entry);
++			ext4_journal_callback_del(handle, &entry->efd_jce);
++			kmem_cache_free(ext4_free_data_cachep, entry);
+ 		}
+ 	}
+ 	/* Add the extent to transaction's private list */
+-	spin_lock(&sbi->s_md_lock);
+-	list_add(&new_entry->list, &handle->h_transaction->t_private_list);
+-	spin_unlock(&sbi->s_md_lock);
++	ext4_journal_callback_add(handle, ext4_free_data_callback,
++				  &new_entry->efd_jce);
+ 	return 0;
+ }
+ 
+@@ -4872,11 +4855,11 @@
+ 		 * blocks being freed are metadata. these blocks shouldn't
+ 		 * be used until this transaction is committed
+ 		 */
+-		new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+-		new_entry->start_blk = bit;
+-		new_entry->group  = block_group;
+-		new_entry->count = count;
+-		new_entry->t_tid = handle->h_transaction->t_tid;
++		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
++		new_entry->efd_start_blk = bit;
++		new_entry->efd_group  = block_group;
++		new_entry->efd_count = count;
++		new_entry->efd_tid = handle->h_transaction->t_tid;
+ 
+ 		ext4_lock_group(sb, block_group);
+ 		mb_clear_bits(bitmap_bh->b_data, bit, count);
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:11:38.097666318 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:11:44.197733192 +0200
+@@ -301,6 +301,23 @@
+ 
+ EXPORT_SYMBOL(ext4_journal_abort_handle);
+ 
++static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
++{
++	struct super_block		*sb = journal->j_private;
++	struct ext4_sb_info		*sbi = EXT4_SB(sb);
++	int				error = is_journal_aborted(journal);
++	struct ext4_journal_cb_entry	*jce, *tmp;
++
++	spin_lock(&sbi->s_md_lock);
++	list_for_each_entry_safe(jce, tmp, &txn->t_private_list, jce_list) {
++		list_del_init(&jce->jce_list);
++		spin_unlock(&sbi->s_md_lock);
++		jce->jce_func(sb, jce, error);
++		spin_lock(&sbi->s_md_lock);
++	}
++	spin_unlock(&sbi->s_md_lock);
++}
++
+ /* Deal with the reporting of failure conditions on a filesystem such as
+  * inconsistencies detected or read IO failures.
+  *
+@@ -2979,6 +2996,8 @@
+ 	}
+ 	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
+ 
++	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
++
+ no_journal:
+ 	err = percpu_counter_init(&sbi->s_freeblocks_counter,
+ 				  ext4_count_free_blocks(sb));
diff --git a/ldiskfs/kernel_patches/patches/ext4-kill-dx_root-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-kill-dx_root-2.6.32-vanilla.patch
new file mode 100644
index 0000000..fa4aa0d
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-kill-dx_root-2.6.32-vanilla.patch
@@ -0,0 +1,237 @@
+removes static definition of dx_root struct. so that "." and ".." dirent can
+have extra data. This patch does not change any functionality but is required for
+ext4_data_in_dirent patch.
+ 
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:49.061664012 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:52.005665948 +0200
+@@ -114,22 +114,13 @@
+  * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
+  */
+ 
+-struct dx_root
++struct dx_root_info
+ {
+-	struct fake_dirent dot;
+-	char dot_name[4];
+-	struct fake_dirent dotdot;
+-	char dotdot_name[4];
+-	struct dx_root_info
+-	{
+-		__le32 reserved_zero;
+-		u8 hash_version;
+-		u8 info_length; /* 8 */
+-		u8 indirect_levels;
+-		u8 unused_flags;
+-	}
+-	info;
+-	struct dx_entry	entries[0];
++	__le32 reserved_zero;
++	u8 hash_version;
++	u8 info_length; /* 8 */
++	u8 indirect_levels;
++	u8 unused_flags;
+ };
+ 
+ struct dx_node
+@@ -243,6 +234,16 @@
+  * Future: use high four bits of block for coalesce-on-delete flags
+  * Mask them off for now.
+  */
++struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
++{
++       /* get dotdot first */
++       de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
++
++       /* dx root info is after dotdot entry */
++       de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
++
++       return (struct dx_root_info *) de;
++}
+ 
+ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
+ {
+@@ -397,7 +398,7 @@
+ {
+ 	unsigned count, indirect;
+ 	struct dx_entry *at, *entries, *p, *q, *m;
+-	struct dx_root *root;
++	struct dx_root_info * info;
+ 	struct buffer_head *bh;
+ 	struct dx_frame *frame = frame_in;
+ 	u32 hash;
+@@ -405,18 +406,18 @@
+ 	frame->bh = NULL;
+ 	if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
+ 		goto fail;
+-	root = (struct dx_root *) bh->b_data;
+-	if (root->info.hash_version != DX_HASH_TEA &&
+-	    root->info.hash_version != DX_HASH_HALF_MD4 &&
+-	    root->info.hash_version != DX_HASH_LEGACY) {
++	info = dx_get_dx_info((struct ext4_dir_entry_2*)bh->b_data);
++	if (info->hash_version != DX_HASH_TEA &&
++	    info->hash_version != DX_HASH_HALF_MD4 &&
++	    info->hash_version != DX_HASH_LEGACY) {
+ 		ext4_warning(dir->i_sb, __func__,
+ 			     "Unrecognised inode hash code %d for directory "
+-                            "#%lu", root->info.hash_version, dir->i_ino);
++			     "#%lu", info->hash_version, dir->i_ino);
+ 		brelse(bh);
+ 		*err = ERR_BAD_DX_DIR;
+ 		goto fail;
+ 	}
+-	hinfo->hash_version = root->info.hash_version;
++	hinfo->hash_version = info->hash_version;
+ 	if (hinfo->hash_version <= DX_HASH_TEA)
+ 		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+ 	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+@@ -424,29 +425,28 @@
+ 		ext4fs_dirhash(d_name->name, d_name->len, hinfo);
+ 	hash = hinfo->hash;
+ 
+-	if (root->info.unused_flags & 1) {
++	if (info->unused_flags & 1) {
+ 		ext4_warning(dir->i_sb, __func__,
+ 			     "Unimplemented inode hash flags: %#06x",
+-			     root->info.unused_flags);
++			     info->unused_flags);
+ 		brelse(bh);
+ 		*err = ERR_BAD_DX_DIR;
+ 		goto fail;
+ 	}
+ 
+-	if ((indirect = root->info.indirect_levels) > 1) {
++	if ((indirect = info->indirect_levels) > 1) {
+ 		ext4_warning(dir->i_sb, __func__,
+ 			     "Unimplemented inode hash depth: %#06x",
+-			     root->info.indirect_levels);
++			     info->indirect_levels);
+ 		brelse(bh);
+ 		*err = ERR_BAD_DX_DIR;
+ 		goto fail;
+ 	}
+ 
+-	entries = (struct dx_entry *) (((char *)&root->info) +
+-				       root->info.info_length);
++	entries = (struct dx_entry *) (((char *)info) + info->info_length);
+ 
+ 	if (dx_get_limit(entries) != dx_root_limit(dir,
+-						   root->info.info_length)) {
++						   info->info_length)) {
+ 		ext4_warning(dir->i_sb, __func__,
+ 			     "dx entry: limit != root limit");
+ 		brelse(bh);
+@@ -528,10 +528,12 @@
+ 
+ static void dx_release (struct dx_frame *frames)
+ {
++	struct dx_root_info *info;
+ 	if (frames[0].bh == NULL)
+ 		return;
+ 
+-	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
++	info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
++	if (info->indirect_levels)
+ 		brelse(frames[1].bh);
+ 	brelse(frames[0].bh);
+ }
+@@ -1442,17 +1444,16 @@
+ 	const char	*name = dentry->d_name.name;
+ 	int		namelen = dentry->d_name.len;
+ 	struct buffer_head *bh2;
+-	struct dx_root	*root;
+ 	struct dx_frame	frames[2], *frame;
+ 	struct dx_entry *entries;
+-	struct ext4_dir_entry_2	*de, *de2;
++	struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
+ 	char		*data1, *top;
+ 	unsigned	len;
+ 	int		retval;
+ 	unsigned	blocksize;
+ 	struct dx_hash_info hinfo;
+ 	ext4_lblk_t  block;
+-	struct fake_dirent *fde;
++	struct dx_root_info *dx_info;
+ 
+ 	blocksize =  dir->i_sb->s_blocksize;
+ 	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
+@@ -1462,20 +1463,20 @@
+ 		brelse(bh);
+ 		return retval;
+ 	}
+-	root = (struct dx_root *) bh->b_data;
++	dot_de = (struct ext4_dir_entry_2 *) bh->b_data;
++	dotdot_de = ext4_next_entry(dot_de, blocksize);
+ 
+ 	/* The 0th block becomes the root, move the dirents out */
+-	fde = &root->dotdot;
+-	de = (struct ext4_dir_entry_2 *)((char *)fde +
+-		ext4_rec_len_from_disk(fde->rec_len, blocksize));
+-	if ((char *) de >= (((char *) root) + blocksize)) {
++	de = (struct ext4_dir_entry_2 *)((char *)dotdot_de +
++		ext4_rec_len_from_disk(dotdot_de->rec_len, blocksize));
++	if ((char *) de >= (((char *) dot_de) + blocksize)) {
+ 		ext4_error(dir->i_sb, __func__,
+ 			   "invalid rec_len for '..' in inode %lu",
+ 			   dir->i_ino);
+ 		brelse(bh);
+ 		return -EIO;
+ 	}
+-	len = ((char *) root) + blocksize - (char *) de;
++	len = ((char *) dot_de) + blocksize - (char *) de;
+ 
+ 	/* Allocate new block for the 0th block's dirents */
+ 	bh2 = ext4_append(handle, dir, &block, &retval);
+@@ -1494,19 +1495,23 @@
+ 	de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
+ 					   blocksize);
+ 	/* Initialize the root; the dot dirents already exist */
+-	de = (struct ext4_dir_entry_2 *) (&root->dotdot);
+-	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
+-					   blocksize);
+-	memset (&root->info, 0, sizeof(root->info));
+-	root->info.info_length = sizeof(root->info);
+-	root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+-	entries = root->entries;
++	dotdot_de->rec_len = ext4_rec_len_to_disk(blocksize -
++			le16_to_cpu(dot_de->rec_len), blocksize);
++
++	/* initialize hashing info */
++	dx_info = dx_get_dx_info(dot_de);
++	memset (dx_info, 0, sizeof(*dx_info));
++	dx_info->info_length = sizeof(*dx_info);
++	dx_info->hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
++
++	entries = (void *)dx_info + sizeof(*dx_info);
++
+ 	dx_set_block(entries, 1);
+ 	dx_set_count(entries, 1);
+-	dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
++	dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
+ 
+ 	/* Initialize as for dx_probe */
+-	hinfo.hash_version = root->info.hash_version;
++	hinfo.hash_version = dx_info->hash_version;
+ 	if (hinfo.hash_version <= DX_HASH_TEA)
+ 		hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+ 	hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+@@ -1756,6 +1761,7 @@
+ 				goto journal_error;
+ 			brelse (bh2);
+ 		} else {
++			struct dx_root_info * info;
+ 			dxtrace(printk(KERN_DEBUG
+ 				       "Creating second level index...\n"));
+ 			memcpy((char *) entries2, (char *) entries,
+@@ -1765,7 +1771,9 @@
+ 			/* Set up root */
+ 			dx_set_count(entries, 1);
+ 			dx_set_block(entries + 0, newblock);
+-			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
++			info = dx_get_dx_info((struct ext4_dir_entry_2*)
++					frames[0].bh->b_data);
++			info->indirect_levels = 1;
+ 
+ 			/* Add new access path frame */
+ 			frame = frames + 1;
diff --git a/ldiskfs/kernel_patches/patches/ext4-large-eas-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-large-eas-2.6.32-vanilla.patch
new file mode 100644
index 0000000..443a153
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-large-eas-2.6.32-vanilla.patch
@@ -0,0 +1,736 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:11:16.361665139 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:20.317665268 +0200
+@@ -1258,6 +1258,7 @@
+ #define EXT4_FEATURE_INCOMPAT_64BIT		0x0080
+ #define EXT4_FEATURE_INCOMPAT_MMP               0x0100
+ #define EXT4_FEATURE_INCOMPAT_FLEX_BG		0x0200
++#define EXT4_FEATURE_INCOMPAT_EA_INODE		0x0400
+ #define EXT4_FEATURE_INCOMPAT_DIRDATA		0x1000
+ 
+ #define EXT4_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
+@@ -1267,6 +1268,7 @@
+ 					 EXT4_FEATURE_INCOMPAT_EXTENTS| \
+ 					 EXT4_FEATURE_INCOMPAT_64BIT| \
+ 					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
++					 EXT4_FEATURE_INCOMPAT_EA_INODE| \
+ 					 EXT4_FEATURE_INCOMPAT_MMP| \
+ 					 EXT4_FEATURE_INCOMPAT_DIRDATA)
+ 
+@@ -1565,6 +1567,12 @@
+ #endif
+ 
+ /*
++ * Maximum size of xattr attributes for FEATURE_INCOMPAT_EA_INODE 1Mb
++ * This limit is arbitrary, but is reasonable for the xattr API.
++ */
++#define EXT4_XATTR_MAX_LARGE_EA_SIZE    (1024 * 1024)
++
++/*
+  * Function prototypes
+  */
+ 
+Index: linux-source-2.6.32/fs/ext4/xattr.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/xattr.c	2012-06-28 12:10:12.481658638 +0200
++++ linux-source-2.6.32/fs/ext4/xattr.c	2012-06-28 12:11:20.321664768 +0200
+@@ -168,19 +168,26 @@
+ }
+ 
+ static inline int
+-ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
++ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size,
++		       struct inode *inode)
+ {
+ 	size_t value_size = le32_to_cpu(entry->e_value_size);
+ 
+-	if (entry->e_value_block != 0 || value_size > size ||
+-	    le16_to_cpu(entry->e_value_offs) + value_size > size)
++	if ((entry->e_value_inum == 0) &&
++	   (le16_to_cpu(entry->e_value_offs) + value_size > size))
++		return -EIO;
++	if (entry->e_value_inum != 0 &&
++	    (le32_to_cpu(entry->e_value_inum) < EXT4_FIRST_INO(inode->i_sb) ||
++	     le32_to_cpu(entry->e_value_inum) >
++	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_inodes_count)))
+ 		return -EIO;
+ 	return 0;
+ }
+ 
+ static int
+ ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
+-		      const char *name, size_t size, int sorted)
++		      const char *name, size_t size, int sorted,
++		      struct inode *inode)
+ {
+ 	struct ext4_xattr_entry *entry;
+ 	size_t name_len;
+@@ -200,11 +207,103 @@
+ 			break;
+ 	}
+ 	*pentry = entry;
+-	if (!cmp && ext4_xattr_check_entry(entry, size))
++	if (!cmp && ext4_xattr_check_entry(entry, size, inode))
+ 			return -EIO;
+ 	return cmp ? -ENODATA : 0;
+ }
+ 
++/*
++ * Read the EA value from an inode.
++ */
++static int
++ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t *size)
++{
++	unsigned long block = 0;
++	struct buffer_head *bh = NULL;
++	int err, blocksize;
++	size_t csize, ret_size = 0;
++
++	if (*size == 0)
++		return 0;
++
++	blocksize = ea_inode->i_sb->s_blocksize;
++
++	while (ret_size < *size) {
++		csize = (*size - ret_size) > blocksize ? blocksize :
++							*size - ret_size;
++		bh = ext4_bread(NULL, ea_inode, block, 0, &err);
++		if (!bh) {
++			*size = ret_size;
++			return err;
++		}
++		memcpy(buf, bh->b_data, csize);
++		brelse(bh);
++
++		buf += csize;
++		block += 1;
++		ret_size += csize;
++	}
++
++	*size = ret_size;
++
++	return err;
++}
++
++struct inode *ext4_xattr_inode_iget(struct inode *parent, int ea_ino, int *err)
++{
++	struct inode *ea_inode = NULL;
++
++	ea_inode = ext4_iget(parent->i_sb, ea_ino);
++	if (ea_inode == NULL || is_bad_inode(ea_inode)) {
++		ext4_error(parent->i_sb, "error while reading EA inode %d",
++			   ea_ino);
++		*err = -EIO;
++		return NULL;
++	}
++
++	if (ea_inode->i_xattr_inode_parent != parent->i_ino ||
++	    ea_inode->i_generation != parent->i_generation) {
++		ext4_error(parent->i_sb, "Backpointer from EA inode %d "
++			   "to parent invalid.", ea_ino);
++		*err = -EINVAL;
++		goto error;
++	}
++
++	if (!(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL)) {
++		ext4_error(parent->i_sb, "EA inode %d does not have "
++			   "EXT4_EA_INODE_FL flag set.\n", ea_ino);
++		*err = -EINVAL;
++		goto error;
++	}
++
++	*err = 0;
++	return ea_inode;
++
++error:
++	iput(ea_inode);
++	return NULL;
++}
++
++/*
++ * Read the value from the EA inode.
++ */
++static int
++ext4_xattr_inode_get(struct inode *inode, int ea_ino, void *buffer,
++		     size_t *size)
++{
++	struct inode *ea_inode = NULL;
++	int err;
++
++	ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
++	if (err)
++		return err;
++
++	err = ext4_xattr_inode_read(ea_inode, buffer, size);
++	iput(ea_inode);
++
++	return err;
++}
++
+ static int
+ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
+ 		     void *buffer, size_t buffer_size)
+@@ -235,7 +334,8 @@
+ 	}
+ 	ext4_xattr_cache_insert(bh);
+ 	entry = BFIRST(bh);
+-	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
++	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1,
++				      inode);
+ 	if (error == -EIO)
+ 		goto bad_block;
+ 	if (error)
+@@ -245,8 +345,16 @@
+ 		error = -ERANGE;
+ 		if (size > buffer_size)
+ 			goto cleanup;
+-		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
+-		       size);
++		if (entry->e_value_inum != 0) {
++			error = ext4_xattr_inode_get(inode,
++					     le32_to_cpu(entry->e_value_inum),
++					     buffer, &size);
++			if (error)
++				goto cleanup;
++		} else {
++			memcpy(buffer, bh->b_data +
++			       le16_to_cpu(entry->e_value_offs), size);
++		}
+ 	}
+ 	error = size;
+ 
+@@ -280,7 +388,7 @@
+ 	if (error)
+ 		goto cleanup;
+ 	error = ext4_xattr_find_entry(&entry, name_index, name,
+-				      end - (void *)entry, 0);
++				      end - (void *)entry, 0, inode);
+ 	if (error)
+ 		goto cleanup;
+ 	size = le32_to_cpu(entry->e_value_size);
+@@ -288,8 +396,16 @@
+ 		error = -ERANGE;
+ 		if (size > buffer_size)
+ 			goto cleanup;
+-		memcpy(buffer, (void *)IFIRST(header) +
+-		       le16_to_cpu(entry->e_value_offs), size);
++		if (entry->e_value_inum != 0) {
++			error = ext4_xattr_inode_get(inode,
++					     le32_to_cpu(entry->e_value_inum),
++					     buffer, &size);
++			if (error)
++				goto cleanup;
++		} else {
++			memcpy(buffer, (void *)IFIRST(header) +
++			       le16_to_cpu(entry->e_value_offs), size);
++		}
+ 	}
+ 	error = size;
+ 
+@@ -511,7 +627,7 @@
+ {
+ 	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ 		*total += EXT4_XATTR_LEN(last->e_name_len);
+-		if (!last->e_value_block && last->e_value_size) {
++		if (last->e_value_inum == 0 && last->e_value_size > 0) {
+ 			size_t offs = le16_to_cpu(last->e_value_offs);
+ 			if (offs < *min_offs)
+ 				*min_offs = offs;
+@@ -520,11 +636,159 @@
+ 	return (*min_offs - ((void *)last - base) - sizeof(__u32));
+ }
+ 
++/*
++ * Write the value of the EA in an inode.
++ */
++static int
++ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
++		       const void *buf, int bufsize)
++{
++	struct buffer_head *bh = NULL, dummy;
++	unsigned long block = 0;
++	unsigned blocksize = ea_inode->i_sb->s_blocksize;
++	unsigned max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
++	int csize, wsize = 0;
++	int ret = 0;
++	int retries = 0;
++
++retry:
++	while (ret >= 0 && ret < max_blocks) {
++		block += ret;
++		max_blocks -= ret;
++
++		ret = ext4_get_blocks(handle, ea_inode, block, max_blocks,
++				      &dummy, EXT4_GET_BLOCKS_CREATE);
++		if (ret <= 0) {
++			ext4_mark_inode_dirty(handle, ea_inode);
++			if (ret == -ENOSPC &&
++			    ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
++				ret = 0;
++				goto retry;
++			}
++			break;
++		}
++	}
++
++	if (ret < 0)
++		return ret;
++
++	block = 0;
++	while (wsize < bufsize) {
++		if (bh != NULL)
++			brelse(bh);
++		csize = (bufsize - wsize) > blocksize ? blocksize :
++								bufsize - wsize;
++		bh = ext4_getblk(handle, ea_inode, block, 0, &ret);
++		if (!bh)
++			goto out;
++		ret = ext4_journal_get_write_access(handle, bh);
++		if (ret)
++			goto out;
++
++		memcpy(bh->b_data, buf, csize);
++		set_buffer_uptodate(bh);
++		ext4_journal_dirty_metadata(handle, bh);
++
++		buf += csize;
++		wsize += csize;
++		block += 1;
++	}
++
++	i_size_write(ea_inode, wsize);
++	ext4_update_i_disksize(ea_inode, wsize);
++
++	ext4_mark_inode_dirty(handle, ea_inode);
++
++out:
++	brelse(bh);
++
++	return ret;
++}
++
++/*
++ * Create an inode to store the value of a large EA.
++ */
++static struct inode *
++ext4_xattr_inode_create(handle_t *handle, struct inode *inode)
++{
++	struct inode *ea_inode = NULL;
++
++	/*
++	 * Let the next inode be the goal, so we try and allocate the EA inode
++	 * in the same group, or nearby one.
++	 */
++	ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
++				  S_IFREG|0600, NULL, inode->i_ino + 1);
++
++	if (!IS_ERR(ea_inode)) {
++		ea_inode->i_op = &ext4_file_inode_operations;
++		ea_inode->i_fop = &ext4_file_operations;
++		ext4_set_aops(ea_inode);
++		ea_inode->i_generation = inode->i_generation;
++		EXT4_I(ea_inode)->i_flags |= EXT4_EA_INODE_FL;
++
++		/*
++		 * A back-pointer from EA inode to parent inode will be useful
++		 * for e2fsck.
++		 */
++		ea_inode->i_xattr_inode_parent = inode->i_ino;
++		unlock_new_inode(ea_inode);
++	}
++
++	return ea_inode;
++}
++
++/*
++ * Unlink the inode storing the value of the EA.
++ */
++static int
++ext4_xattr_inode_unlink(struct inode *inode, int ea_ino)
++{
++	struct inode *ea_inode = NULL;
++	int err;
++
++	ea_inode = ext4_xattr_inode_iget(inode, ea_ino, &err);
++	if (err)
++		return err;
++
++	ea_inode->i_nlink = 0;
++	iput(ea_inode);
++
++	return 0;
++}
++
++/*
++ * Add value of the EA in an inode.
++ */
++static int
++ext4_xattr_inode_set(handle_t *handle, struct inode *inode, int *ea_ino,
++		     const void *value, size_t value_len)
++{
++	struct inode *ea_inode = NULL;
++	int err;
++
++	/* Create an inode for the EA value */
++	ea_inode = ext4_xattr_inode_create(handle, inode);
++	if (IS_ERR(ea_inode))
++		return -1;
++
++	err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
++	if (err)
++		ea_inode->i_nlink = 0;
++	else
++		*ea_ino = ea_inode->i_ino;
++
++	iput(ea_inode);
++
++	return err;
++}
++
+ struct ext4_xattr_info {
+-	int name_index;
+ 	const char *name;
+ 	const void *value;
+ 	size_t value_len;
++	int name_index;
++	int in_inode;
+ };
+ 
+ struct ext4_xattr_search {
+@@ -536,15 +800,23 @@
+ };
+ 
+ static int
+-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
++ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
++		     handle_t *handle, struct inode *inode)
+ {
+ 	struct ext4_xattr_entry *last;
+ 	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
++	int in_inode = i->in_inode;
++
++	if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
++		 EXT4_FEATURE_INCOMPAT_EA_INODE) &&
++	    (EXT4_XATTR_SIZE(i->value_len) >
++	     EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
++		in_inode = 1;
+ 
+ 	/* Compute min_offs and last. */
+ 	last = s->first;
+ 	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+-		if (!last->e_value_block && last->e_value_size) {
++		if (last->e_value_inum == 0 && last->e_value_size > 0) {
+ 			size_t offs = le16_to_cpu(last->e_value_offs);
+ 			if (offs < min_offs)
+ 				min_offs = offs;
+@@ -552,16 +824,21 @@
+ 	}
+ 	free = min_offs - ((void *)last - s->base) - sizeof(__u32);
+ 	if (!s->not_found) {
+-		if (!s->here->e_value_block && s->here->e_value_size) {
++		if (!in_inode && s->here->e_value_inum == 0 &&
++		    s->here->e_value_size > 0) {
+ 			size_t size = le32_to_cpu(s->here->e_value_size);
+ 			free += EXT4_XATTR_SIZE(size);
+ 		}
+ 		free += EXT4_XATTR_LEN(name_len);
+ 	}
+ 	if (i->value) {
+-		if (free < EXT4_XATTR_SIZE(i->value_len) ||
+-		    free < EXT4_XATTR_LEN(name_len) +
+-			   EXT4_XATTR_SIZE(i->value_len))
++		size_t value_len = EXT4_XATTR_SIZE(i->value_len);
++
++		if (in_inode)
++			value_len = 0;
++
++		if (free < value_len ||
++		    free < EXT4_XATTR_LEN(name_len) + value_len)
+ 			return -ENOSPC;
+ 	}
+ 
+@@ -575,7 +852,8 @@
+ 		s->here->e_name_len = name_len;
+ 		memcpy(s->here->e_name, i->name, name_len);
+ 	} else {
+-		if (!s->here->e_value_block && s->here->e_value_size) {
++		if (s->here->e_value_offs > 0 && s->here->e_value_inum == 0 &&
++		    s->here->e_value_size > 0) {
+ 			void *first_val = s->base + min_offs;
+ 			size_t offs = le16_to_cpu(s->here->e_value_offs);
+ 			void *val = s->base + offs;
+@@ -604,13 +882,17 @@
+ 			last = s->first;
+ 			while (!IS_LAST_ENTRY(last)) {
+ 				size_t o = le16_to_cpu(last->e_value_offs);
+-				if (!last->e_value_block &&
+-				    last->e_value_size && o < offs)
++				if (last->e_value_size > 0 && o < offs)
+ 					last->e_value_offs =
+ 						cpu_to_le16(o + size);
+ 				last = EXT4_XATTR_NEXT(last);
+ 			}
+ 		}
++		if (s->here->e_value_inum != 0) {
++			ext4_xattr_inode_unlink(inode,
++					le32_to_cpu(s->here->e_value_inum));
++			s->here->e_value_inum = 0;
++		}
+ 		if (!i->value) {
+ 			/* Remove the old name. */
+ 			size_t size = EXT4_XATTR_LEN(name_len);
+@@ -624,10 +906,17 @@
+ 	if (i->value) {
+ 		/* Insert the new value. */
+ 		s->here->e_value_size = cpu_to_le32(i->value_len);
+-		if (i->value_len) {
++		if (in_inode) {
++			int ea_ino = le32_to_cpu(s->here->e_value_inum);
++			ext4_xattr_inode_set(handle, inode, &ea_ino, i->value,
++					     i->value_len);
++			s->here->e_value_inum = cpu_to_le32(ea_ino);
++			s->here->e_value_offs = 0;
++		} else if (i->value_len) {
+ 			size_t size = EXT4_XATTR_SIZE(i->value_len);
+ 			void *val = s->base + min_offs - size;
+ 			s->here->e_value_offs = cpu_to_le16(min_offs - size);
++			s->here->e_value_inum = 0;
+ 			memset(val + size - EXT4_XATTR_PAD, 0,
+ 			       EXT4_XATTR_PAD); /* Clear the pad bytes. */
+ 			memcpy(val, i->value, i->value_len);
+@@ -673,7 +962,7 @@
+ 		bs->s.end = bs->bh->b_data + bs->bh->b_size;
+ 		bs->s.here = bs->s.first;
+ 		error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
+-					      i->name, bs->bh->b_size, 1);
++					     i->name, bs->bh->b_size, 1, inode);
+ 		if (error && error != -ENODATA)
+ 			goto cleanup;
+ 		bs->s.not_found = error;
+@@ -697,8 +986,6 @@
+ 
+ #define header(x) ((struct ext4_xattr_header *)(x))
+ 
+-	if (i->value && i->value_len > sb->s_blocksize)
+-		return -ENOSPC;
+ 	if (s->base) {
+ 		ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
+ 					bs->bh->b_blocknr);
+@@ -713,7 +1000,7 @@
+ 				ce = NULL;
+ 			}
+ 			ea_bdebug(bs->bh, "modifying in-place");
+-			error = ext4_xattr_set_entry(i, s);
++			error = ext4_xattr_set_entry(i, s, handle, inode);
+ 			if (!error) {
+ 				if (!IS_LAST_ENTRY(s->first))
+ 					ext4_xattr_rehash(header(s->base),
+@@ -765,7 +1052,7 @@
+ 		s->end = s->base + sb->s_blocksize;
+ 	}
+ 
+-	error = ext4_xattr_set_entry(i, s);
++	error = ext4_xattr_set_entry(i, s, handle, inode);
+ 	if (error == -EIO)
+ 		goto bad_block;
+ 	if (error)
+@@ -910,7 +1197,7 @@
+ 		/* Find the named attribute. */
+ 		error = ext4_xattr_find_entry(&is->s.here, i->name_index,
+ 					      i->name, is->s.end -
+-					      (void *)is->s.base, 0);
++					      (void *)is->s.base, 0, inode);
+ 		if (error && error != -ENODATA)
+ 			return error;
+ 		is->s.not_found = error;
+@@ -929,7 +1216,7 @@
+ 
+ 	if (EXT4_I(inode)->i_extra_isize == 0)
+ 		return -ENOSPC;
+-	error = ext4_xattr_set_entry(i, s);
++	error = ext4_xattr_set_entry(i, s, handle, inode);
+ 	if (error)
+ 		return error;
+ 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
+@@ -965,7 +1252,7 @@
+ 		.name = name,
+ 		.value = value,
+ 		.value_len = value_len,
+-
++		.in_inode = 0,
+ 	};
+ 	struct ext4_xattr_ibody_find is = {
+ 		.s = { .not_found = -ENODATA, },
+@@ -1034,6 +1321,15 @@
+ 					goto cleanup;
+ 			}
+ 			error = ext4_xattr_block_set(handle, inode, &i, &bs);
++			if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
++					EXT4_FEATURE_INCOMPAT_EA_INODE) &&
++			    error == -ENOSPC) {
++				/* xattr not fit to block, store at external
++				 * inode */
++				i.in_inode = 1;
++				error = ext4_xattr_ibody_set(handle, inode,
++							     &i, &is);
++			}
+ 			if (error)
+ 				goto cleanup;
+ 			if (!is.s.not_found) {
+@@ -1081,10 +1377,25 @@
+ 	       const void *value, size_t value_len, int flags)
+ {
+ 	handle_t *handle;
++	struct super_block *sb = inode->i_sb;
++	int buffer_credits;
+ 	int error, retries = 0;
+ 
++	buffer_credits = EXT4_DATA_TRANS_BLOCKS(sb);
++	if ((value_len >= EXT4_XATTR_MIN_LARGE_EA_SIZE(sb->s_blocksize)) &&
++	    EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EA_INODE)) {
++		int nrblocks = (value_len + sb->s_blocksize - 1) >>
++					sb->s_blocksize_bits;
++
++		/* For new inode */
++		buffer_credits += EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + 3;
++
++		/* For data blocks of EA inode */
++		buffer_credits += ext4_meta_trans_blocks(inode, nrblocks, 0);
++	}
++
+ retry:
+-	handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
++	handle = ext4_journal_start(inode, buffer_credits);
+ 	if (IS_ERR(handle)) {
+ 		error = PTR_ERR(handle);
+ 	} else {
+@@ -1094,7 +1405,7 @@
+ 					      value, value_len, flags);
+ 		error2 = ext4_journal_stop(handle);
+ 		if (error == -ENOSPC &&
+-		    ext4_should_retry_alloc(inode->i_sb, &retries))
++		    ext4_should_retry_alloc(sb, &retries))
+ 			goto retry;
+ 		if (error == 0)
+ 			error = error2;
+@@ -1116,7 +1427,7 @@
+ 
+ 	/* Adjust the value offsets of the entries */
+ 	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+-		if (!last->e_value_block && last->e_value_size) {
++		if (last->e_value_inum == 0 && last->e_value_size > 0) {
+ 			new_offs = le16_to_cpu(last->e_value_offs) +
+ 							value_offs_shift;
+ 			BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
+@@ -1355,15 +1666,41 @@
+ /*
+  * ext4_xattr_delete_inode()
+  *
+- * Free extended attribute resources associated with this inode. This
++ * Free extended attribute resources associated with this inode. Traverse
++ * all entries and unlink any xattr inodes associated with this inode. This
+  * is called immediately before an inode is freed. We have exclusive
+- * access to the inode.
++ * access to the inode. If an orphan inode is deleted it will also delete any
++ * xattr block and all xattr inodes. They are checked by ext4_xattr_inode_iget()
++ * to ensure they belong to the parent inode and were not deleted already.
+  */
+ void
+ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
+ {
+ 	struct buffer_head *bh = NULL;
++	struct ext4_xattr_ibody_header *header;
++	struct ext4_inode *raw_inode;
++	struct ext4_iloc iloc;
++	struct ext4_xattr_entry *entry;
++	int error;
++
++	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
++		goto delete_external_ea;
++
++	error = ext4_get_inode_loc(inode, &iloc);
++	if (error)
++		goto cleanup;
++	raw_inode = ext4_raw_inode(&iloc);
++	header = IHDR(inode, raw_inode);
++	entry = IFIRST(header);
++	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
++		if (entry->e_value_inum != 0) {
++			ext4_xattr_inode_unlink(inode,
++					le32_to_cpu(entry->e_value_inum));
++			entry->e_value_inum = 0;
++		}
++	}
+ 
++delete_external_ea:
+ 	if (!EXT4_I(inode)->i_file_acl)
+ 		goto cleanup;
+ 	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+@@ -1380,6 +1717,16 @@
+ 			EXT4_I(inode)->i_file_acl);
+ 		goto cleanup;
+ 	}
++
++	entry = BFIRST(bh);
++	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
++		if (entry->e_value_inum != 0) {
++			ext4_xattr_inode_unlink(inode,
++					le32_to_cpu(entry->e_value_inum));
++			entry->e_value_inum = 0;
++		}
++	}
++
+ 	ext4_xattr_release_block(handle, inode, bh);
+ 	EXT4_I(inode)->i_file_acl = 0;
+ 
+@@ -1454,10 +1801,9 @@
+ 		    entry1->e_name_index != entry2->e_name_index ||
+ 		    entry1->e_name_len != entry2->e_name_len ||
+ 		    entry1->e_value_size != entry2->e_value_size ||
++		    entry1->e_value_inum != entry2->e_value_inum ||
+ 		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
+ 			return 1;
+-		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
+-			return -EIO;
+ 		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
+ 			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
+ 			   le32_to_cpu(entry1->e_value_size)))
+@@ -1542,7 +1888,7 @@
+ 		       *name++;
+ 	}
+ 
+-	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
++	if (entry->e_value_inum == 0 && entry->e_value_size != 0) {
+ 		__le32 *value = (__le32 *)((char *)header +
+ 			le16_to_cpu(entry->e_value_offs));
+ 		for (n = (le32_to_cpu(entry->e_value_size) +
+Index: linux-source-2.6.32/fs/ext4/xattr.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/xattr.h	2012-06-28 12:08:14.717669056 +0200
++++ linux-source-2.6.32/fs/ext4/xattr.h	2012-06-28 12:11:20.321664768 +0200
+@@ -38,7 +38,7 @@
+ 	__u8	e_name_len;	/* length of name */
+ 	__u8	e_name_index;	/* attribute name index */
+ 	__le16	e_value_offs;	/* offset in disk block of value */
+-	__le32	e_value_block;	/* disk block attribute is stored on (n/i) */
++	__le32	e_value_inum;	/* inode in which the value is stored */
+ 	__le32	e_value_size;	/* size of attribute value */
+ 	__le32	e_hash;		/* hash value of name and value */
+ 	char	e_name[0];	/* attribute name */
+@@ -63,6 +63,15 @@
+ 		EXT4_I(inode)->i_extra_isize))
+ #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
+ 
++#define i_xattr_inode_parent i_mtime.tv_sec
++
++/*
++ * The minimum size of EA value when you start storing it in an external inode
++ * size of block - size of header - size of 1 entry - 4 null bytes
++*/
++#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b)					\
++	((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
++
+ # ifdef CONFIG_EXT4_FS_XATTR
+ 
+ extern struct xattr_handler ext4_xattr_user_handler;
diff --git a/ldiskfs/kernel_patches/patches/ext4-lookup-dotdot-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-lookup-dotdot-2.6.32-vanilla.patch
new file mode 100644
index 0000000..70c0f57
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-lookup-dotdot-2.6.32-vanilla.patch
@@ -0,0 +1,43 @@
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:09:27.373665875 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:03.433668774 +0200
+@@ -1098,6 +1098,38 @@
+ 			}
+ 		}
+ 	}
++	/* ".." shouldn't go into dcache to preserve dcache hierarchy
++	 * otherwise we'll get parent being a child of actual child.
++	 * see bug 10458 for details -bzzz */
++	if (inode && (dentry->d_name.name[0] == '.' && (dentry->d_name.len == 1 ||
++		(dentry->d_name.len == 2 && dentry->d_name.name[1] == '.')))) {
++		struct dentry *tmp, *goal = NULL;
++		struct list_head *lp;
++
++		/* first, look for an existing dentry - any one is good */
++		spin_lock(&dcache_lock);
++		list_for_each(lp, &inode->i_dentry) {
++			tmp = list_entry(lp, struct dentry, d_alias);
++			goal = tmp;
++			dget_locked(goal);
++			break;
++		}
++		if (goal == NULL) {
++			/* there is no alias, we need to make current dentry:
++			 *  a) inaccessible for __d_lookup()
++			 *  b) inaccessible for iopen */
++			J_ASSERT(list_empty(&dentry->d_alias));
++			dentry->d_flags |= DCACHE_NFSFS_RENAMED;
++			/* this is d_instantiate() ... */
++			list_add(&dentry->d_alias, &inode->i_dentry);
++			dentry->d_inode = inode;
++		}
++		spin_unlock(&dcache_lock);
++		if (goal)
++			iput(inode);
++		return goal;
++	}
++
+ 	return d_splice_alias(inode, dentry);
+ }
+ 
diff --git a/ldiskfs/kernel_patches/patches/ext4-map_inode_page-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-map_inode_page-2.6.32-vanilla.patch
new file mode 100644
index 0000000..f260449
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-map_inode_page-2.6.32-vanilla.patch
@@ -0,0 +1,87 @@
+Index: linux-source-2.6.32/fs/ext4/inode.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:08:38.957670045 +0200
++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:09:14.201666378 +0200
+@@ -5905,3 +5905,67 @@
+ 	up_read(&inode->i_alloc_sem);
+ 	return ret;
+ }
++
++int ext4_map_inode_page(struct inode *inode, struct page *page,
++			unsigned long *blocks, int *created, int create)
++{
++	unsigned int blocksize, blocks_per_page;
++	unsigned long iblock;
++	struct buffer_head dummy;
++	void *handle;
++	int i, rc = 0, failed = 0, needed_blocks;
++
++	blocksize = inode->i_sb->s_blocksize;
++	blocks_per_page = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
++	iblock = page->index * blocks_per_page;
++
++	for (i = 0; i < blocks_per_page; i++, iblock++) {
++		blocks[i] = ext4_bmap(inode->i_mapping, iblock);
++		if (blocks[i] == 0) {
++			failed++;
++			if (created)
++				created[i] = -1;
++		} else if (created) {
++			created[i] = 0;
++		}
++	}
++
++	if (failed == 0 || create == 0)
++		return 0;
++
++	needed_blocks = ext4_writepage_trans_blocks(inode);
++	handle = ext4_journal_start(inode, needed_blocks);
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
++	iblock = page->index * blocks_per_page;
++	for (i = 0; i < blocks_per_page; i++, iblock++) {
++		if (blocks[i] != 0)
++			continue;
++
++		rc = ext4_ind_get_blocks(handle, inode, iblock, 1, &dummy,
++					 EXT4_GET_BLOCKS_CREATE);
++		if (rc < 0) {
++			printk(KERN_INFO "ext4_map_inode_page: error reading "
++					"block %ld\n", iblock);
++			goto out;
++		} else {
++			if (rc > 1)
++				WARN_ON(1);
++			rc = 0;
++		}
++		/* Unmap any metadata buffers from the block mapping, to avoid
++		 * data corruption due to direct-write from Lustre being
++		 * clobbered by a later flush of the blockdev metadata buffer.*/
++		if (buffer_new(&dummy))
++			unmap_underlying_metadata(dummy.b_bdev,
++					dummy.b_blocknr);
++		blocks[i] = dummy.b_blocknr;
++		if (created)
++			created[i] = 1;
++	}
++
++out:
++	ext4_journal_stop(handle);
++	return rc;
++}
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:08:39.101667098 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:09:14.205662995 +0200
+@@ -4052,6 +4052,10 @@
+ 	exit_ext4_system_zone();
+ }
+ 
++int ext4_map_inode_page(struct inode *inode, struct page *page,
++			unsigned long *blocks, int *created, int create);
++EXPORT_SYMBOL(ext4_map_inode_page);
++
+ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
+ MODULE_DESCRIPTION("Fourth Extended Filesystem");
+ MODULE_LICENSE("GPL");
diff --git a/ldiskfs/kernel_patches/patches/ext4-max-dir-size-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-max-dir-size-2.6.32-vanilla.patch
new file mode 100644
index 0000000..2a0d595
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-max-dir-size-2.6.32-vanilla.patch
@@ -0,0 +1,67 @@
+Index: linux-source-2.6.32/fs/ext4/ialloc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:09:34.581668116 +0200
++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:10:06.381677398 +0200
+@@ -818,11 +818,15 @@
+ 	sb = dir->i_sb;
+ 	ngroups = ext4_get_groups_count(sb);
+ 	trace_ext4_request_inode(dir, mode);
++
++	sbi = EXT4_SB(sb);
++	if (sbi->s_max_dir_size > 0 && i_size_read(dir) >= sbi->s_max_dir_size)
++		return ERR_PTR(-EFBIG);
++
+ 	inode = new_inode(sb);
+ 	if (!inode)
+ 		return ERR_PTR(-ENOMEM);
+ 	ei = EXT4_I(inode);
+-	sbi = EXT4_SB(sb);
+ 
+ 	if (!goal)
+ 		goal = sbi->s_inode_goal;
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:09:59.693721980 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:06.381677398 +0200
+@@ -2239,6 +2239,7 @@
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
+ 		 inode_readahead_blks_store, s_inode_readahead_blks);
+ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
++EXT4_RW_ATTR_SBI_UI(max_dir_size, s_max_dir_size);
+ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+@@ -2253,6 +2254,7 @@
+ 	ATTR_LIST(lifetime_write_kbytes),
+ 	ATTR_LIST(inode_readahead_blks),
+ 	ATTR_LIST(inode_goal),
++	ATTR_LIST(max_dir_size),
+ 	ATTR_LIST(mb_stats),
+ 	ATTR_LIST(mb_max_to_scan),
+ 	ATTR_LIST(mb_min_to_scan),
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:09:59.689673296 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:06.385666114 +0200
+@@ -1107,6 +1107,8 @@
+ 	unsigned int s_log_groups_per_flex;
+ 	struct flex_groups *s_flex_groups;
+ 
++	unsigned long s_max_dir_size;
++
+ 	/* workqueue for dio unwritten */
+ 	struct workqueue_struct *dio_unwritten_wq;
+ 
+@@ -1495,6 +1497,12 @@
+ #define EXT4_MMP_MAX_CHECK_INTERVAL    300UL
+ 
+ /*
++ * max directory size tunable
++ */
++#define EXT4_DEFAULT_MAX_DIR_SIZE	0
++#define EXT4_MAX_DIR_SIZE_NAME		"max_dir_size"
++
++/*
+  * Function prototypes
+  */
+ 
diff --git a/ldiskfs/kernel_patches/patches/ext4-mballoc-extra-checks-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-mballoc-extra-checks-2.6.32-vanilla.patch
new file mode 100644
index 0000000..19f6b57
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-mballoc-extra-checks-2.6.32-vanilla.patch
@@ -0,0 +1,317 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:15.325667336 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:19.989662510 +0200
+@@ -1850,6 +1850,7 @@
+ 	ext4_grpblk_t	bb_fragments;	/* nr of freespace fragments */
+ 	ext4_grpblk_t	bb_largest_free_order;/* order of largest frag in BG */
+ 	struct          list_head bb_prealloc_list;
++	unsigned long   bb_prealloc_nr;
+ #ifdef DOUBLE_CHECK
+ 	void            *bb_bitmap;
+ #endif
+Index: linux-source-2.6.32/fs/ext4/mballoc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:10:15.333660398 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:10:19.997669436 +0200
+@@ -337,7 +337,7 @@
+ static struct kmem_cache *ext4_pspace_cachep;
+ static struct kmem_cache *ext4_ac_cachep;
+ static struct kmem_cache *ext4_free_ext_cachep;
+-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ 					ext4_group_t group);
+ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+ 						ext4_group_t group);
+@@ -680,7 +680,7 @@
+ }
+ 
+ static noinline_for_stack
+-void ext4_mb_generate_buddy(struct super_block *sb,
++int ext4_mb_generate_buddy(struct super_block *sb,
+ 				void *buddy, void *bitmap, ext4_group_t group)
+ {
+ 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+@@ -712,14 +712,13 @@
+ 	grp->bb_fragments = fragments;
+ 
+ 	if (free != grp->bb_free) {
+-		ext4_grp_locked_error(sb, group,  __func__,
+-			"EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
+-			group, free, grp->bb_free);
+-		/*
+-		 * If we intent to continue, we consider group descritor
+-		 * corrupt and update bb_free using bitmap value
+-		 */
+-		grp->bb_free = free;
++		struct ext4_group_desc *gdp;
++		gdp = ext4_get_group_desc (sb, group, NULL);
++		ext4_error(sb, "group %lu: %u blocks in bitmap, %u in bb, "
++			"%u in gd, %lu pa's\n", (long unsigned int)group,
++			free, grp->bb_free, ext4_free_blks_count(sb, gdp),
++			grp->bb_prealloc_nr);
++		return -EIO;
+ 	}
+ 	mb_set_largest_free_order(sb, grp);
+ 
+@@ -730,6 +729,8 @@
+ 	EXT4_SB(sb)->s_mb_buddies_generated++;
+ 	EXT4_SB(sb)->s_mb_generation_time += period;
+ 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
++
++	return 0;
+ }
+ 
+ /* The buddy information is attached the buddy cache inode
+@@ -864,7 +865,7 @@
+ 	first_block = page->index * blocks_per_page;
+ 	/* init the page  */
+ 	memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
+-	for (i = 0; i < blocks_per_page; i++) {
++	for (i = 0; i < blocks_per_page && err == 0; i++) {
+ 		int group;
+ 		struct ext4_group_info *grinfo;
+ 
+@@ -899,7 +900,7 @@
+ 			 * incore got set to the group block bitmap below
+ 			 */
+ 			ext4_lock_group(sb, group);
+-			ext4_mb_generate_buddy(sb, data, incore, group);
++			err = ext4_mb_generate_buddy(sb, data, incore, group);
+ 			ext4_unlock_group(sb, group);
+ 			incore = NULL;
+ 		} else {
+@@ -913,7 +914,7 @@
+ 			memcpy(data, bitmap, blocksize);
+ 
+ 			/* mark all preallocated blks used in in-core bitmap */
+-			ext4_mb_generate_from_pa(sb, data, group);
++			err = ext4_mb_generate_from_pa(sb, data, group);
+ 			ext4_mb_generate_from_freelist(sb, data, group);
+ 			ext4_unlock_group(sb, group);
+ 
+@@ -923,7 +924,8 @@
+ 			incore = data;
+ 		}
+ 	}
+-	SetPageUptodate(page);
++	if (likely(err == 0))
++		SetPageUptodate(page);
+ 
+ out:
+ 	if (bh) {
+@@ -2184,9 +2186,11 @@
+ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ {
+ 	struct super_block *sb = seq->private;
++	struct ext4_group_desc *gdp;
+ 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
+ 	int i;
+ 	int err;
++	int free = 0;
+ 	struct ext4_buddy e4b;
+ 	struct sg {
+ 		struct ext4_group_info info;
+@@ -2195,10 +2199,10 @@
+ 
+ 	group--;
+ 	if (group == 0)
+-		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
++		seq_printf(seq, "#%-5s: %-5s %-5s %-5s %-5s %-5s"
+ 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
+ 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
+-			   "group", "free", "frags", "first",
++			   "group", "free", "frags", "first", "first", "pa",
+ 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
+ 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
+ 
+@@ -2209,13 +2213,20 @@
+ 		seq_printf(seq, "#%-5u: I/O error\n", group);
+ 		return 0;
+ 	}
++
++	gdp = ext4_get_group_desc(sb, group, NULL);
++	if (gdp != NULL)
++		free = ext4_free_blks_count(sb, gdp);
++
+ 	ext4_lock_group(sb, group);
+ 	memcpy(&sg, ext4_get_group_info(sb, group), i);
+ 	ext4_unlock_group(sb, group);
+ 	ext4_mb_unload_buddy(&e4b);
+ 
+-	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
+-			sg.info.bb_fragments, sg.info.bb_first_free);
++	seq_printf(seq, "#%-5lu: %-5u %-5u %-5u %-5u %-5lu [",
++			(long unsigned int)group, sg.info.bb_free, free,
++			sg.info.bb_fragments, sg.info.bb_first_free,
++			sg.info.bb_prealloc_nr);
+ 	for (i = 0; i <= 13; i++)
+ 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
+ 				sg.info.bb_counters[i] : 0);
+@@ -3398,23 +3409,68 @@
+ }
+ 
+ /*
++ * check free blocks in bitmap match free block in group descriptor
++ * do this before taking preallocated blocks into account to be able
++ * to detect on-disk corruptions. The group lock should be hold by the
++ * caller.
++ */
++int ext4_mb_check_ondisk_bitmap(struct super_block *sb, void *bitmap,
++				struct ext4_group_desc *gdp, int group)
++{
++	unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
++	unsigned short i, first, free = 0;
++
++	i = mb_find_next_zero_bit(bitmap, max, 0);
++
++	while (i < max) {
++		first = i;
++		i = mb_find_next_bit(bitmap, max, i);
++		if (i > max)
++			i = max;
++		free += i - first;
++		if (i < max)
++			i = mb_find_next_zero_bit(bitmap, max, i);
++	}
++
++	if (free != ext4_free_blks_count(sb, gdp)) {
++		ext4_error(sb, "on-disk bitmap for group %d"
++			"corrupted: %u blocks free in bitmap, %u - in gd\n",
++			group, free, ext4_free_blks_count(sb, gdp));
++		return -EIO;
++	}
++	return 0;
++}
++
++/*
+  * the function goes through all preallocation in this group and marks them
+  * used in in-core bitmap. buddy must be generated from this bitmap
+  * Need to be called with ext4 group lock held
+  */
+ static noinline_for_stack
+-void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ 					ext4_group_t group)
+ {
+ 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+ 	struct ext4_prealloc_space *pa;
++	struct ext4_group_desc *gdp;
+ 	struct list_head *cur;
+ 	ext4_group_t groupnr;
+ 	ext4_grpblk_t start;
+ 	int preallocated = 0;
+ 	int count = 0;
++	int skip = 0;
++	int err;
+ 	int len;
+ 
++	gdp = ext4_get_group_desc (sb, group, NULL);
++	if (gdp == NULL)
++		return -EIO;
++
++	/* before applying preallocations, check bitmap consistency */
++	err = ext4_mb_check_ondisk_bitmap(sb, bitmap, gdp, group);
++	if (err)
++		return err;
++
+ 	/* all form of preallocation discards first load group,
+ 	 * so the only competing code is preallocation use.
+ 	 * we don't need any locking here
+@@ -3430,14 +3486,23 @@
+ 					     &groupnr, &start);
+ 		len = pa->pa_len;
+ 		spin_unlock(&pa->pa_lock);
+-		if (unlikely(len == 0))
++		if (unlikely(len == 0)) {
++			skip++;
+ 			continue;
++		}
+ 		BUG_ON(groupnr != group);
+ 		mb_set_bits(bitmap, start, len);
+ 		preallocated += len;
+ 		count++;
+ 	}
++	if (count + skip != grp->bb_prealloc_nr) {
++		ext4_error(sb, "lost preallocations: "
++			   "count %d, bb_prealloc_nr %lu, skip %d\n",
++			   count, grp->bb_prealloc_nr, skip);
++		return -EIO;
++	}
+ 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
++	return 0;
+ }
+ 
+ static void ext4_mb_pa_callback(struct rcu_head *head)
+@@ -3496,6 +3561,7 @@
+ 	 */
+ 	ext4_lock_group(sb, grp);
+ 	list_del(&pa->pa_group_list);
++	ext4_get_group_info(sb, grp)->bb_prealloc_nr--;
+ 	ext4_unlock_group(sb, grp);
+ 
+ 	spin_lock(pa->pa_obj_lock);
+@@ -3587,6 +3653,7 @@
+ 
+ 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+ 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
++	grp->bb_prealloc_nr++;
+ 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+ 
+ 	spin_lock(pa->pa_obj_lock);
+@@ -3648,6 +3715,7 @@
+ 
+ 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+ 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
++	grp->bb_prealloc_nr++;
+ 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+ 
+ 	/*
+@@ -3848,6 +3916,8 @@
+ 
+ 		spin_unlock(&pa->pa_lock);
+ 
++		BUG_ON(grp->bb_prealloc_nr == 0);
++		grp->bb_prealloc_nr--;
+ 		list_del(&pa->pa_group_list);
+ 		list_add(&pa->u.pa_tmp_list, &list);
+ 	}
+@@ -3988,7 +4058,7 @@
+ 		if (err) {
+ 			ext4_error(sb, __func__, "Error in loading buddy "
+ 					"information for %u", group);
+-			continue;
++			return;
+ 		}
+ 
+ 		bitmap_bh = ext4_read_block_bitmap(sb, group);
+@@ -4000,6 +4070,8 @@
+ 		}
+ 
+ 		ext4_lock_group(sb, group);
++		BUG_ON(e4b.bd_info->bb_prealloc_nr == 0);
++		e4b.bd_info->bb_prealloc_nr--;
+ 		list_del(&pa->pa_group_list);
+ 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
+ 		ext4_unlock_group(sb, group);
+@@ -4273,6 +4345,7 @@
+ 		}
+ 		ext4_lock_group(sb, group);
+ 		list_del(&pa->pa_group_list);
++		ext4_get_group_info(sb, group)->bb_prealloc_nr--;
+ 		ext4_mb_release_group_pa(&e4b, pa, ac);
+ 		ext4_unlock_group(sb, group);
+ 
+Index: linux-source-2.6.32/fs/ext4/mballoc.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.h	2012-06-28 12:08:28.261665077 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.h	2012-06-28 12:10:19.997669436 +0200
+@@ -88,7 +88,7 @@
+ /*
+  * for which requests use 2^N search using buddies
+  */
+-#define MB_DEFAULT_ORDER2_REQS		2
++#define MB_DEFAULT_ORDER2_REQS		8
+ 
+ /*
+  * default group prealloc size 512 blocks
diff --git a/ldiskfs/kernel_patches/patches/ext4-mballoc-pa_free-mismatch-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-mballoc-pa_free-mismatch-2.6.32-vanilla.patch
new file mode 100644
index 0000000..0349db6
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-mballoc-pa_free-mismatch-2.6.32-vanilla.patch
@@ -0,0 +1,111 @@
+Index: linux-source-2.6.32/fs/ext4/mballoc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:11:06.761669638 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:11:12.521665249 +0200
+@@ -3637,6 +3637,7 @@
+ 	INIT_LIST_HEAD(&pa->pa_group_list);
+ 	pa->pa_deleted = 0;
+ 	pa->pa_type = MB_INODE_PA;
++	pa->pa_error = 0;
+ 
+ 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
+ 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+@@ -3698,6 +3699,7 @@
+ 	INIT_LIST_HEAD(&pa->pa_group_list);
+ 	pa->pa_deleted = 0;
+ 	pa->pa_type = MB_GROUP_PA;
++	pa->pa_error = 0;
+ 
+ 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
+ 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+@@ -3760,7 +3762,9 @@
+ 	int err = 0;
+ 	int free = 0;
+ 
++	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+ 	BUG_ON(pa->pa_deleted == 0);
++	BUG_ON(pa->pa_inode == NULL);
+ 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+ 	grp_blk_start = pa->pa_pstart - bit;
+ 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+@@ -3796,19 +3800,27 @@
+ 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
+ 		bit = next + 1;
+ 	}
+-	if (free != pa->pa_free) {
+-		printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
+-			pa, (unsigned long) pa->pa_lstart,
+-			(unsigned long) pa->pa_pstart,
+-			(unsigned long) pa->pa_len);
++
++	/* "free < pa->pa_free" means we maybe double alloc the same blocks,
++	 * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
++	if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
++		ext4_error(sb, "pa free mismatch: [pa %p] "
++				"[phy %lu] [logic %lu] [len %u] [free %u] "
++				"[error %u] [inode %lu] [freed %u]", pa,
++				(unsigned long)pa->pa_pstart,
++				(unsigned long)pa->pa_lstart,
++				(unsigned)pa->pa_len, (unsigned)pa->pa_free,
++				(unsigned)pa->pa_error, pa->pa_inode->i_ino,
++				free);
+ 		ext4_grp_locked_error(sb, group,
+-					__func__, "free %u, pa_free %u",
+-					free, pa->pa_free);
++				__func__, "free %u, pa_free %u",
++				free, pa->pa_free);
+ 		/*
+ 		 * pa is already deleted so we use the value obtained
+ 		 * from the bitmap and continue.
+ 		 */
+ 	}
++	BUG_ON(pa->pa_free != free);
+ 	atomic_add(free, &sbi->s_mb_discarded);
+ 
+ 	return err;
+@@ -4575,6 +4587,25 @@
+ 			ac->ac_b_ex.fe_len = 0;
+ 			ar->len = 0;
+ 			ext4_mb_show_ac(ac);
++			if (ac->ac_pa) {
++				struct ext4_prealloc_space *pa = ac->ac_pa;
++
++				/* We can not make sure whether the bitmap has
++				 * been updated or not when fail case. So can
++				 * not revert pa_free back, just mark pa_error*/
++				pa->pa_error++;
++				ext4_error(sb,
++					"Updating bitmap error: [err %d] "
++					"[pa %p] [phy %lu] [logic %lu] "
++					"[len %u] [free %u] [error %u] "
++					"[inode %lu]", *errp, pa,
++					(unsigned long)pa->pa_pstart,
++					(unsigned long)pa->pa_lstart,
++					(unsigned)pa->pa_len,
++					(unsigned)pa->pa_free,
++					(unsigned)pa->pa_error,
++					pa->pa_inode ? pa->pa_inode->i_ino : 0);
++			}
+ 		} else {
+ 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
+ 			ar->len = ac->ac_b_ex.fe_len;
+Index: linux-source-2.6.32/fs/ext4/mballoc.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.h	2012-06-28 12:11:06.641723341 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.h	2012-06-28 12:11:12.525665254 +0200
+@@ -20,6 +20,7 @@
+ #include <linux/version.h>
+ #include <linux/blkdev.h>
+ #include <linux/mutex.h>
++#include <linux/genhd.h>
+ #include "ext4_jbd2.h"
+ #include "ext4.h"
+ 
+@@ -130,6 +131,7 @@
+ 	ext4_grpblk_t		pa_free;	/* how many blocks are free */
+ 	unsigned short		pa_type;	/* pa type. inode or group */
+ 	spinlock_t		*pa_obj_lock;
++	unsigned short		pa_error;
+ 	struct inode		*pa_inode;	/* hack, for history only */
+ };
+ 
diff --git a/ldiskfs/kernel_patches/patches/ext4-misc-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-misc-2.6.32-vanilla.patch
new file mode 100644
index 0000000..50f452e
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-misc-2.6.32-vanilla.patch
@@ -0,0 +1,257 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:19.989662510 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:23.325664479 +0200
+@@ -1182,6 +1182,9 @@
+ 
+ #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
+ 
++/* Has been moved to linux/magic.h but we need it for Lustre */
++#define EXT4_SUPER_MAGIC	0xEF53
++
+ /*
+  * Codes for operating systems
+  */
+@@ -1608,6 +1611,9 @@
+ extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
+ extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
+ 						ext4_group_t, int);
++extern void ext4_mb_discard_inode_preallocations(struct inode *);
++
++
+ /* inode.c */
+ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
+ 		struct buffer_head *bh, ext4_fsblk_t blocknr);
+Index: linux-source-2.6.32/fs/ext4/ext4_extents.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4_extents.h	2012-06-28 12:09:30.485668675 +0200
++++ linux-source-2.6.32/fs/ext4/ext4_extents.h	2012-06-28 12:10:23.325664479 +0200
+@@ -58,6 +58,12 @@
+  */
+ #define EXT_STATS_
+ 
++/*
++ * define EXT4_ALLOC_NEEDED to 0 since block bitmap, group desc. and sb
++ * are now accounted in ext4_ext_calc_credits_for_insert()
++ */
++#define EXT4_ALLOC_NEEDED 0
++#define HAVE_EXT_PREPARE_CB_EXTENT
+ 
+ /*
+  * ext4_inode has i_block array (60 bytes total).
+@@ -160,6 +166,7 @@
+ #define EXT_INIT_MAX_LEN	(1UL << 15)
+ #define EXT_UNINIT_MAX_LEN	(EXT_INIT_MAX_LEN - 1)
+ 
++#define EXT4_EXT_HAS_NO_TREE	/* ext4_extents_tree struct is not used*/
+ 
+ #define EXT_FIRST_EXTENT(__hdr__) \
+ 	((struct ext4_extent *) (((char *) (__hdr__)) +		\
+@@ -239,6 +246,8 @@
+ extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
+ 						   int num,
+ 						   struct ext4_ext_path *path);
++extern int ext4_ext_calc_credits_for_insert(struct inode *,
++					    struct ext4_ext_path *);
+ extern int ext4_can_extents_be_merged(struct inode *inode,
+ 				      struct ext4_extent *ex1,
+ 				      struct ext4_extent *ex2);
+Index: linux-source-2.6.32/fs/ext4/ext4_jbd2.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4_jbd2.c	2012-06-28 12:08:27.121667674 +0200
++++ linux-source-2.6.32/fs/ext4/ext4_jbd2.c	2012-06-28 12:10:23.325664479 +0200
+@@ -31,6 +31,7 @@
+ 	}
+ 	return err;
+ }
++EXPORT_SYMBOL(__ext4_journal_get_write_access);
+ 
+ int __ext4_journal_forget(const char *where, handle_t *handle,
+ 				struct buffer_head *bh)
+@@ -107,3 +108,4 @@
+ 	}
+ 	return err;
+ }
++EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
+Index: linux-source-2.6.32/fs/ext4/ext4_jbd2.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4_jbd2.h	2012-06-28 12:08:26.825663990 +0200
++++ linux-source-2.6.32/fs/ext4/ext4_jbd2.h	2012-06-28 12:10:23.325664479 +0200
+@@ -35,6 +35,8 @@
+ 	(EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)   \
+ 	 ? 27U : 8U)
+ 
++#define ext4_journal_dirty_metadata(handle, bh)  \
++		ext4_handle_dirty_metadata(handle, NULL, bh)
+ /* Extended attribute operations touch at most two data buffers,
+  * two bitmap buffers, and two group summaries, in addition to the inode
+  * and the superblock, which are already accounted for. */
+Index: linux-source-2.6.32/fs/ext4/extents.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/extents.c	2012-06-28 12:09:30.489673456 +0200
++++ linux-source-2.6.32/fs/ext4/extents.c	2012-06-28 12:10:23.329669991 +0200
+@@ -2030,6 +2030,55 @@
+ }
+ 
+ /*
++ * This routine returns max. credits extent tree can consume.
++ * It should be OK for low-performance paths like ->writepage()
++ * To allow many writing process to fit a single transaction,
++ * caller should calculate credits under truncate_mutex and
++ * pass actual path.
++ */
++int ext4_ext_calc_credits_for_insert(struct inode *inode,
++				     struct ext4_ext_path *path)
++{
++	int depth, needed;
++
++	if (path) {
++		/* probably there is space in leaf? */
++		depth = ext_depth(inode);
++		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
++				< le16_to_cpu(path[depth].p_hdr->eh_max))
++			return 1;
++	}
++
++	/*
++	 * given 32bit logical block (4294967296 blocks), max. tree
++	 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
++	 * let's also add one more level for imbalance.
++	 */
++	depth = 5;
++
++	/* allocation of new data block(s) */
++	needed = 2;
++
++	/*
++	 * tree can be full, so it'd need to grow in depth:
++	 * we need one credit to modify old root, credits for
++	 * new root will be added in split accounting
++	 */
++	needed += 1;
++	/*
++	 * Index split can happen, we'd need:
++	 *    allocate intermediate indexes (bitmap + group)
++	 *  + change two blocks at each level, but root (already included)
++	 */
++	needed += (depth * 2) + (depth * 2);
++
++	/* any allocation modifies superblock */
++	needed += 1;
++
++	return needed;
++}
++
++/*
+  * How many index/leaf blocks need to change/allocate to modify nrblocks?
+  *
+  * if nrblocks are fit in a single extent (chunk flag is 1), then
+@@ -3890,3 +3939,14 @@
+ #endif
+ }
+ 
++EXPORT_SYMBOL(ext4_ext_store_pblock);
++EXPORT_SYMBOL(ext4_ext_search_right);
++EXPORT_SYMBOL(ext4_ext_search_left);
++EXPORT_SYMBOL(ext_pblock);
++EXPORT_SYMBOL(ext4_ext_insert_extent);
++EXPORT_SYMBOL(ext4_mb_new_blocks);
++EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
++EXPORT_SYMBOL(ext4_mark_inode_dirty);
++EXPORT_SYMBOL(ext4_ext_walk_space);
++EXPORT_SYMBOL(ext4_ext_find_extent);
++EXPORT_SYMBOL(ext4_ext_drop_refs);
+Index: linux-source-2.6.32/fs/ext4/inode.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:10:15.329671358 +0200
++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:10:23.333666208 +0200
+@@ -5062,6 +5062,7 @@
+ 	iget_failed(inode);
+ 	return ERR_PTR(ret);
+ }
++EXPORT_SYMBOL(ext4_iget);
+ 
+ static int ext4_inode_blocks_set(handle_t *handle,
+ 				struct ext4_inode *raw_inode,
+Index: linux-source-2.6.32/fs/ext4/mballoc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:10:19.997669436 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:10:23.333666208 +0200
+@@ -4085,6 +4085,7 @@
+ 	if (ac)
+ 		kmem_cache_free(ext4_ac_cachep, ac);
+ }
++EXPORT_SYMBOL(ext4_discard_preallocations);
+ 
+ /*
+  * finds all preallocated spaces and return blocks being freed to them
+@@ -4879,3 +4880,6 @@
+ 		kmem_cache_free(ext4_ac_cachep, ac);
+ 	return;
+ }
++
++EXPORT_SYMBOL(ext4_free_blocks);
++
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:15.337667957 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:23.337667624 +0200
+@@ -127,6 +127,7 @@
+ 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+ 		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
+ }
++EXPORT_SYMBOL(ext4_itable_unused_count);
+ 
+ void ext4_block_bitmap_set(struct super_block *sb,
+ 			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
+@@ -1101,10 +1102,12 @@
+ 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+ 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
+ 	Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize,
++	Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
+ 	Opt_usrquota, Opt_grpquota, Opt_i_version,
+ 	Opt_stripe, Opt_delalloc, Opt_nodelalloc,
+ 	Opt_block_validity, Opt_noblock_validity,
+ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
++	Opt_mballoc,
+ 	Opt_discard, Opt_nodiscard,
+ };
+ 
+@@ -1156,6 +1159,9 @@
+ 	{Opt_noquota, "noquota"},
+ 	{Opt_quota, "quota"},
+ 	{Opt_usrquota, "usrquota"},
++	{Opt_iopen, "iopen"},
++	{Opt_noiopen, "noiopen"},
++	{Opt_iopen_nopriv, "iopen_nopriv"},
+ 	{Opt_barrier, "barrier=%u"},
+ 	{Opt_barrier, "barrier"},
+ 	{Opt_nobarrier, "nobarrier"},
+@@ -1171,6 +1177,7 @@
+ 	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
+ 	{Opt_auto_da_alloc, "auto_da_alloc"},
+ 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
++	{Opt_mballoc, "mballoc"},
+ 	{Opt_discard, "discard"},
+ 	{Opt_nodiscard, "nodiscard"},
+ 	{Opt_err, NULL},
+@@ -1524,6 +1531,10 @@
+ 			else
+ 				clear_opt(sbi->s_mount_opt, BARRIER);
+ 			break;
++		case Opt_iopen:
++		case Opt_noiopen:
++		case Opt_iopen_nopriv:
++			break;
+ 		case Opt_ignore:
+ 			break;
+ 		case Opt_resize:
+@@ -1607,6 +1618,8 @@
+ 		case Opt_nodiscard:
+ 			clear_opt(sbi->s_mount_opt, DISCARD);
+ 			break;
++		case Opt_mballoc:
++			break;
+ 		default:
+ 			ext4_msg(sb, KERN_ERR,
+ 			       "Unrecognized mount option \"%s\" "
diff --git a/ldiskfs/kernel_patches/patches/ext4-mmp-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-mmp-2.6.32-vanilla.patch
new file mode 100644
index 0000000..a7fca5f
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-mmp-2.6.32-vanilla.patch
@@ -0,0 +1,575 @@
+Prevent an ext4 filesystem from being mounted multiple times.
+A sequence number is stored on disk and is periodically updated (every 5
+seconds by default) by a mounted filesystem.
+At mount time, we now wait for s_mmp_update_interval seconds to make sure
+that the MMP sequence does not change.
+In case of failure, the nodename, bdevname and the time at which the MMP
+block was last updated is displayed.
+Move all mmp code to a dedicated file (mmp.c).
+
+Signed-off-by: Andreas Dilger <adilger <at> whamcloud.com>
+Signed-off-by: Johann Lombardi <johann <at> whamcloud.com>
+---
+ fs/ext4/Makefile |    3 +-
+ fs/ext4/ext4.h   |   76 ++++++++++++-
+ fs/ext4/mmp.c    |  351 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ fs/ext4/super.c  |   18 +++-
+ 4 files changed, 444 insertions(+), 4 deletions(-)
+ create mode 100644 fs/ext4/mmp.c
+
+Index: linux-source-2.6.32/fs/ext4/Makefile
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/Makefile	2012-06-28 12:08:33.661737979 +0200
++++ linux-source-2.6.32/fs/ext4/Makefile	2012-06-28 12:09:59.685666701 +0200
+@@ -6,7 +6,8 @@
+ 
+ ext4-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+ 		ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
+-		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o
++		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
++		mmp.o
+ 
+ ext4-$(CONFIG_EXT4_FS_XATTR)		+= xattr.o xattr_user.o xattr_trusted.o
+ ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:09:34.581668116 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:09:59.689673296 +0200
+@@ -962,7 +962,7 @@
+ 	__le16	s_want_extra_isize; 	/* New inodes should reserve # bytes */
+ 	__le32	s_flags;		/* Miscellaneous flags */
+ 	__le16  s_raid_stride;		/* RAID stride */
+-	__le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
++	__le16  s_mmp_update_interval;  /* # seconds to wait in MMP checking */
+ 	__le64  s_mmp_block;            /* Block for multi-mount protection */
+ 	__le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
+ 	__u8	s_log_groups_per_flex;  /* FLEX_BG group size */
+@@ -1109,6 +1109,9 @@
+ 
+ 	/* workqueue for dio unwritten */
+ 	struct workqueue_struct *dio_unwritten_wq;
++
++	/* Kernel thread for multiple mount protection */
++	struct task_struct *s_mmp_tsk; 
+ };
+ 
+ static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
+@@ -1248,7 +1251,8 @@
+ 					 EXT4_FEATURE_INCOMPAT_META_BG| \
+ 					 EXT4_FEATURE_INCOMPAT_EXTENTS| \
+ 					 EXT4_FEATURE_INCOMPAT_64BIT| \
+-					 EXT4_FEATURE_INCOMPAT_FLEX_BG)
++					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
++					 EXT4_FEATURE_INCOMPAT_MMP)
+ #define EXT4_FEATURE_RO_COMPAT_SUPP	(EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+ 					 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+ 					 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
+@@ -1430,6 +1434,67 @@
+ extern struct proc_dir_entry *ext4_proc_root;
+ 
+ /*
++ * This structure will be used for multiple mount protection. It will be
++ * written into the block number saved in the s_mmp_block field in the
++ * superblock. Programs that check MMP should assume that if
++ * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
++ * to use the filesystem, regardless of how old the timestamp is.
++ */
++#define EXT4_MMP_MAGIC     0x004D4D50U /* ASCII for MMP */
++#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
++#define EXT4_MMP_SEQ_FSCK  0xE24D4D50U /* mmp_seq value when being fscked */
++#define EXT4_MMP_SEQ_MAX   0xE24D4D4FU /* maximum valid mmp_seq value */
++
++struct mmp_struct {
++       __le32  mmp_magic;              /* Magic number for MMP */
++       __le32  mmp_seq;                /* Sequence no. updated periodically */
++
++       /*
++        * mmp_time, mmp_nodename & mmp_bdevname are only used for information
++        * purposes and do not affect the correctness of the algorithm
++        */
++       __le64  mmp_time;               /* Time last updated */
++       char    mmp_nodename[64];       /* Node which last updated MMP block */
++       char    mmp_bdevname[32];       /* Bdev which last updated MMP block */
++
++       /*
++        * mmp_check_interval is used to verify if the MMP block has been
++        * updated on the block device. The value is updated based on the
++        * maximum time to write the MMP block during an update cycle.
++        */
++       __le16  mmp_check_interval;
++
++       __le16  mmp_pad1;
++       __le32  mmp_pad2[227];
++};
++
++/* arguments passed to the mmp thread */
++struct mmpd_data {
++       struct buffer_head *bh; /* bh from initial read_mmp_block() */
++       struct super_block *sb;  /* super block of the fs */
++};
++
++/*
++ * Check interval multiplier
++ * The MMP block is written every update interval and initially checked every
++ * update interval x the multiplier (the value is then adapted based on the
++ * write latency). The reason is that writes can be delayed under load and we
++ * don't want readers to incorrectly assume that the filesystem is no longer
++ * in use.
++ */
++#define EXT4_MMP_CHECK_MULT            2UL
++
++/*
++ * Minimum interval for MMP checking in seconds.
++ */
++#define EXT4_MMP_MIN_CHECK_INTERVAL    5UL
++
++/*
++ * Maximum interval for MMP checking in seconds.
++ */
++#define EXT4_MMP_MAX_CHECK_INTERVAL    300UL
++
++/*
+  * Function prototypes
+  */
+ 
+@@ -1594,6 +1659,10 @@
+ 	__attribute__ ((format (printf, 3, 4)));
+ extern void ext4_msg(struct super_block *, const char *, const char *, ...)
+ 	__attribute__ ((format (printf, 3, 4)));
++extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
++			   const char *, const char *);
++#define dump_mmp_msg(sb, mmp, msg)     __dump_mmp_msg(sb, mmp, __func__, \
++                                                      msg)
+ extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
+ 				const char *, const char *, ...)
+ 	__attribute__ ((format (printf, 4, 5)));
+@@ -1876,6 +1945,8 @@
+ 			     __u64 start_orig, __u64 start_donor,
+ 			     __u64 len, __u64 *moved_len);
+ 
++/* mmp.c */
++extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
+ 
+ /*
+  * Add new method to test wether block and inode bitmaps are properly
+Index: linux-source-2.6.32/fs/ext4/mmp.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-source-2.6.32/fs/ext4/mmp.c	2012-06-28 12:09:59.689673296 +0200
+@@ -0,0 +1,351 @@
++#include <linux/fs.h>
++#include <linux/random.h>
++#include <linux/buffer_head.h>
++#include <linux/utsname.h>
++#include <linux/kthread.h>
++
++#include "ext4.h"
++
++/*
++ * Write the MMP block using WRITE_SYNC to try to get the block on-disk
++ * faster.
++ */
++static int write_mmp_block(struct buffer_head *bh)
++{
++       mark_buffer_dirty(bh);
++       lock_buffer(bh);
++       bh->b_end_io = end_buffer_write_sync;
++       get_bh(bh);
++       submit_bh(WRITE_SYNC, bh);
++       wait_on_buffer(bh);
++       if (unlikely(!buffer_uptodate(bh)))
++               return 1;
++
++       return 0;
++}
++
++/*
++ * Read the MMP block. It _must_ be read from disk and hence we clear the
++ * uptodate flag on the buffer.
++ */
++static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
++                         ext4_fsblk_t mmp_block)
++{
++       struct mmp_struct *mmp;
++
++       if (*bh)
++               clear_buffer_uptodate(*bh);
++
++       /* This would be sb_bread(sb, mmp_block), except we need to be sure
++        * that the MD RAID device cache has been bypassed, and that the read
++        * is not blocked in the elevator. */
++       if (!*bh)
++               *bh = sb_getblk(sb, mmp_block);
++       if (*bh) {
++               get_bh(*bh);
++               lock_buffer(*bh);
++               (*bh)->b_end_io = end_buffer_read_sync;
++               submit_bh(READ_SYNC, *bh);
++               wait_on_buffer(*bh);
++               if (!buffer_uptodate(*bh)) {
++                       brelse(*bh);
++                       *bh = NULL;
++               }
++       }
++       if (!*bh) {
++               ext4_warning(sb, "Error while reading MMP block %llu",
++                            mmp_block);
++               return -EIO;
++       }
++
++       mmp = (struct mmp_struct *)((*bh)->b_data);
++       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
++               return -EINVAL;
++
++       return 0;
++}
++
++/*
++ * Dump as much information as possible to help the admin.
++ */
++void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
++                   const char *function, const char *msg)
++{
++       __ext4_warning(sb, function, msg);
++       __ext4_warning(sb, function,
++                      "MMP failure info: last update time: %llu, last update "
++                      "node: %s, last update device: %s\n",
++                      (long long unsigned int) le64_to_cpu(mmp->mmp_time),
++                      mmp->mmp_nodename, mmp->mmp_bdevname);
++}
++
++/*
++ * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
++ */
++static int kmmpd(void *data)
++{
++       struct super_block *sb = ((struct mmpd_data *) data)->sb;
++       struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
++       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
++       struct mmp_struct *mmp;
++       ext4_fsblk_t mmp_block;
++       u32 seq = 0;
++       unsigned long failed_writes = 0;
++       int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
++       unsigned mmp_check_interval;
++       unsigned long last_update_time;
++       unsigned long diff;
++       int retval;
++
++       mmp_block = le64_to_cpu(es->s_mmp_block);
++       mmp = (struct mmp_struct *)(bh->b_data);
++       mmp->mmp_time = cpu_to_le64(get_seconds());
++       /*
++        * Start with the higher mmp_check_interval and reduce it if
++        * the MMP block is being updated on time.
++        */
++       mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
++                                EXT4_MMP_MIN_CHECK_INTERVAL);
++       mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
++       bdevname(bh->b_bdev, mmp->mmp_bdevname);
++
++       memcpy(mmp->mmp_nodename, init_utsname()->nodename,
++              sizeof(mmp->mmp_nodename));
++
++       while (!kthread_should_stop()) {
++               if (++seq > EXT4_MMP_SEQ_MAX)
++                       seq = 1;
++
++               mmp->mmp_seq = cpu_to_le32(seq);
++               mmp->mmp_time = cpu_to_le64(get_seconds());
++               last_update_time = jiffies;
++
++               retval = write_mmp_block(bh);
++               /*
++                * Don't spew too many error messages. Print one every
++                * (s_mmp_update_interval * 60) seconds.
++                */
++               if (retval) {
++                       if ((failed_writes % 60) == 0)
++                               ext4_error(sb, "Error writing to MMP block");
++                       failed_writes++;
++               }
++
++               if (!(le32_to_cpu(es->s_feature_incompat) &
++                   EXT4_FEATURE_INCOMPAT_MMP)) {
++                       ext4_warning(sb, "kmmpd being stopped since MMP feature"
++                                    " has been disabled.");
++                       EXT4_SB(sb)->s_mmp_tsk = NULL;
++                       goto failed;
++               }
++
++               if (sb->s_flags & MS_RDONLY) {
++                       ext4_warning(sb, "kmmpd being stopped since filesystem "
++                                    "has been remounted as readonly.");
++                       EXT4_SB(sb)->s_mmp_tsk = NULL;
++                       goto failed;
++               }
++
++               diff = jiffies - last_update_time;
++               if (diff < mmp_update_interval * HZ)
++                       schedule_timeout_interruptible(mmp_update_interval *
++                                                      HZ - diff);
++
++               /*
++                * We need to make sure that more than mmp_check_interval
++                * seconds have not passed since writing. If that has happened
++                * we need to check if the MMP block is as we left it.
++                */
++               diff = jiffies - last_update_time;
++               if (diff > mmp_check_interval * HZ) {
++                       struct buffer_head *bh_check = NULL;
++                       struct mmp_struct *mmp_check;
++
++                       retval = read_mmp_block(sb, &bh_check, mmp_block);
++                       if (retval) {
++                               ext4_error(sb, "error reading MMP data: %d",
++                                          retval);
++
++                               EXT4_SB(sb)->s_mmp_tsk = NULL;
++                               goto failed;
++                       }
++
++                       mmp_check = (struct mmp_struct *)(bh_check->b_data);
++                       if (mmp->mmp_seq != mmp_check->mmp_seq ||
++                           memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
++                                  sizeof(mmp->mmp_nodename))) {
++                               dump_mmp_msg(sb, mmp_check,
++                                            "Error while updating MMP info. "
++                                            "The filesystem seems to have been"
++                                            " multiply mounted.");
++                               ext4_error(sb, "abort");
++                               goto failed;
++                       }
++                       put_bh(bh_check);
++               }
++
++                /*
++                * Adjust the mmp_check_interval depending on how much time
++                * it took for the MMP block to be written.
++                */
++               mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
++                                            EXT4_MMP_MAX_CHECK_INTERVAL),
++                                        EXT4_MMP_MIN_CHECK_INTERVAL);
++               mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
++       }
++
++       /*
++        * Unmount seems to be clean.
++        */
++       mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
++       mmp->mmp_time = cpu_to_le64(get_seconds());
++
++       retval = write_mmp_block(bh);
++
++failed:
++       kfree(data);
++       brelse(bh);
++       return retval;
++}
++
++/*
++ * Get a random new sequence number but make sure it is not greater than
++ * EXT4_MMP_SEQ_MAX.
++ */
++static unsigned int mmp_new_seq(void)
++{
++       u32 new_seq;
++
++       do {
++               get_random_bytes(&new_seq, sizeof(u32));
++       } while (new_seq > EXT4_MMP_SEQ_MAX);
++
++       return new_seq;
++}
++
++/*
++ * Protect the filesystem from being mounted more than once.
++ */
++int ext4_multi_mount_protect(struct super_block *sb,
++                                   ext4_fsblk_t mmp_block)
++{
++       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
++       struct buffer_head *bh = NULL;
++       struct mmp_struct *mmp = NULL;
++       struct mmpd_data *mmpd_data;
++       u32 seq;
++       unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
++       unsigned int wait_time = 0;
++       int retval;
++
++       if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
++           mmp_block >= ext4_blocks_count(es)) {
++               ext4_warning(sb, "Invalid MMP block in superblock");
++               goto failed;
++       }
++
++       retval = read_mmp_block(sb, &bh, mmp_block);
++       if (retval)
++               goto failed;
++
++       mmp = (struct mmp_struct *)(bh->b_data);
++
++       if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
++               mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
++
++       /*
++        * If check_interval in MMP block is larger, use that instead of
++        * update_interval from the superblock.
++        */
++       if (mmp->mmp_check_interval > mmp_check_interval)
++               mmp_check_interval = mmp->mmp_check_interval;
++
++       seq = le32_to_cpu(mmp->mmp_seq);
++       if (seq == EXT4_MMP_SEQ_CLEAN)
++               goto skip;
++
++       if (seq == EXT4_MMP_SEQ_FSCK) {
++               dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
++               goto failed;
++       }
++
++       wait_time = min(mmp_check_interval * 2 + 1,
++                       mmp_check_interval + 60);
++
++       /* Print MMP interval if more than 20 secs. */
++       if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
++               ext4_warning(sb, "MMP interval %u higher than expected, please"
++                            " wait.\n", wait_time * 2);
++
++       if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
++               ext4_warning(sb, "MMP startup interrupted, failing mount\n");
++               goto failed;
++       }
++
++       retval = read_mmp_block(sb, &bh, mmp_block);
++       if (retval)
++               goto failed;
++       mmp = (struct mmp_struct *)(bh->b_data);
++       if (seq != le32_to_cpu(mmp->mmp_seq)) {
++               dump_mmp_msg(sb, mmp,
++                            "Device is already active on another node.");
++               goto failed;
++       }
++
++skip:
++       /*
++        * write a new random sequence number.
++        */
++       mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
++
++       retval = write_mmp_block(bh);
++       if (retval)
++               goto failed;
++
++       /*
++        * wait for MMP interval and check mmp_seq.
++        */
++       if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
++               ext4_warning(sb, "MMP startup interrupted, failing mount\n");
++               goto failed;
++       }
++
++       retval = read_mmp_block(sb, &bh, mmp_block);
++       if (retval)
++               goto failed;
++       mmp = (struct mmp_struct *)(bh->b_data);
++       if (seq != le32_to_cpu(mmp->mmp_seq)) {
++               dump_mmp_msg(sb, mmp,
++                            "Device is already active on another node.");
++               goto failed;
++       }
++
++       mmpd_data = kmalloc(sizeof(struct mmpd_data), GFP_KERNEL);
++       if (!mmpd_data) {
++               ext4_warning(sb, "not enough memory for mmpd_data");
++               goto failed;
++       }
++       mmpd_data->sb = sb;
++       mmpd_data->bh = bh;
++
++       /*
++        * Start a kernel thread to update the MMP block periodically.
++        */
++       EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
++                                            bdevname(bh->b_bdev,
++                                                     mmp->mmp_bdevname));
++       if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
++               EXT4_SB(sb)->s_mmp_tsk = NULL;
++               kfree(mmpd_data);
++               ext4_warning(sb, "Unable to create kmmpd thread for %s.",
++                            sb->s_id);
++               goto failed;
++       }
++
++       return 0;
++
++failed:
++       brelse(bh);
++       return 1;
++}
++
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:09:23.393677834 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:09:59.693721980 +0200
+@@ -40,6 +40,8 @@
+ #include <linux/log2.h>
+ #include <linux/crc16.h>
+ #include <asm/uaccess.h>
++#include <linux/kthread.h>
++#include <linux/utsname.h>
+ 
+ #include "ext4.h"
+ #include "ext4_jbd2.h"
+@@ -665,6 +667,8 @@
+ 		invalidate_bdev(sbi->journal_bdev);
+ 		ext4_blkdev_remove(sbi);
+ 	}
++	if (sbi->s_mmp_tsk)
++		kthread_stop(sbi->s_mmp_tsk);
+ 	sb->s_fs_info = NULL;
+ 	/*
+ 	 * Now that we are completely done shutting down the
+@@ -2731,6 +2735,10 @@
+ 	needs_recovery = (es->s_last_orphan != 0 ||
+ 			  EXT4_HAS_INCOMPAT_FEATURE(sb,
+ 				    EXT4_FEATURE_INCOMPAT_RECOVER));
++	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
++	    !(sb->s_flags & MS_RDONLY))
++		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
++			goto failed_mount3;
+ 
+ 	/*
+ 	 * The first inode we look at is the journal inode.  Don't try
+@@ -2981,6 +2989,8 @@
+ 		else
+ 			kfree(sbi->s_flex_groups);
+ 	}
++	if (sbi->s_mmp_tsk)
++		kthread_stop(sbi->s_mmp_tsk);
+ failed_mount2:
+ 	for (i = 0; i < db_count; i++)
+ 		brelse(sbi->s_group_desc[i]);
+@@ -3489,7 +3499,7 @@
+ 	struct ext4_mount_options old_opts;
+ 	ext4_group_t g;
+ 	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
+-	int err;
++	int err = 0;
+ #ifdef CONFIG_QUOTA
+ 	int i;
+ #endif
+@@ -3611,6 +3621,13 @@
+ 				goto restore_opts;
+ 			if (!ext4_setup_super(sb, es, 0))
+ 				sb->s_flags &= ~MS_RDONLY;
++			if (EXT4_HAS_INCOMPAT_FEATURE(sb,
++						    EXT4_FEATURE_INCOMPAT_MMP))
++				if (ext4_multi_mount_protect(sb,
++						le64_to_cpu(es->s_mmp_block))) {
++					err = -EROFS;
++					goto restore_opts;
++				}
+ 		}
+ 	}
+ 	ext4_setup_system_zone(sb);
diff --git a/ldiskfs/kernel_patches/patches/ext4-nlink-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-nlink-2.6.32-vanilla.patch
new file mode 100644
index 0000000..07d4721
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-nlink-2.6.32-vanilla.patch
@@ -0,0 +1,16 @@
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:08:58.217663806 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:09:27.373665875 +0200
+@@ -1742,9 +1742,8 @@
+  */
+ static void ext4_dec_count(handle_t *handle, struct inode *inode)
+ {
+-	drop_nlink(inode);
+-	if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
+-		inc_nlink(inode);
++	if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
++		drop_nlink(inode);
+ }
+ 
+ 
diff --git a/ldiskfs/kernel_patches/patches/ext4-nocmtime-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-nocmtime-2.6.32-vanilla.patch
new file mode 100644
index 0000000..fa66ccd
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-nocmtime-2.6.32-vanilla.patch
@@ -0,0 +1,27 @@
+Index: linux-source-2.6.32/fs/ext4/xattr.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/xattr.c	2012-06-28 12:11:24.117664822 +0200
++++ linux-source-2.6.32/fs/ext4/xattr.c	2012-06-28 12:11:31.053665768 +0200
+@@ -1347,7 +1347,7 @@
+ 	}
+ 	if (!error) {
+ 		ext4_xattr_update_super_block(handle, inode->i_sb);
+-		if (!(flags & XATTR_NO_CTIME))
++		if (!IS_NOCMTIME(inode) && !(flags & XATTR_NO_CTIME))
+ 			inode->i_ctime = ext4_current_time(inode);
+ 		if (!value)
+ 			ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:11:16.365664904 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:11:31.057668333 +0200
+@@ -1444,7 +1444,8 @@
+ 	 * happen is that the times are slightly out of date
+ 	 * and/or different from the directory change time.
+ 	 */
+-	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
++	if (!IS_NOCMTIME(dir))
++		dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+ 	ext4_update_dx_flag(dir);
+ 	dir->i_version++;
+ 	ext4_mark_inode_dirty(handle, dir);
diff --git a/ldiskfs/kernel_patches/patches/ext4-osd-iam-exports-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-osd-iam-exports-2.6.32-vanilla.patch
new file mode 100644
index 0000000..7d1461a
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-osd-iam-exports-2.6.32-vanilla.patch
@@ -0,0 +1,68 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:39.449662782 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:42.265665313 +0200
+@@ -1684,6 +1684,9 @@
+ #define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
+ extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
+ 			       struct inode *inode);
++extern struct buffer_head *ext4_append(handle_t *handle,
++				       struct inode *inode,
++				       ext4_lblk_t *block, int *err);
+ 
+ /* resize.c */
+ extern int ext4_group_add(struct super_block *sb,
+Index: linux-source-2.6.32/fs/ext4/hash.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/hash.c	2012-06-28 12:08:21.745667214 +0200
++++ linux-source-2.6.32/fs/ext4/hash.c	2012-06-28 12:10:42.265665313 +0200
+@@ -9,6 +9,7 @@
+  * License.
+  */
+ 
++#include <linux/module.h>
+ #include <linux/fs.h>
+ #include <linux/jbd2.h>
+ #include <linux/cryptohash.h>
+@@ -206,3 +207,4 @@
+ 	hinfo->minor_hash = minor_hash;
+ 	return 0;
+ }
++EXPORT_SYMBOL(ext4fs_dirhash);
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:39.453664850 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:42.283981062 +0200
+@@ -49,9 +49,9 @@
+ #define NAMEI_RA_SIZE	     (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+ #define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
+ 
+-static struct buffer_head *ext4_append(handle_t *handle,
+-					struct inode *inode,
+-					ext4_lblk_t *block, int *err)
++struct buffer_head *ext4_append(handle_t *handle,
++				struct inode *inode,
++				ext4_lblk_t *block, int *err)
+ {
+ 	struct buffer_head *bh;
+ 	struct ext4_inode_info *ei = EXT4_I(inode);
+@@ -76,6 +76,7 @@
+ 	up(&ei->i_append_sem);
+ 	return bh;
+ }
++EXPORT_SYMBOL(ext4_append);
+ 
+ #ifndef assert
+ #define assert(test) J_ASSERT(test)
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:36.377665203 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:42.285668861 +0200
+@@ -412,6 +412,7 @@
+ 
+ 	ext4_handle_error(sb);
+ }
++EXPORT_SYMBOL(__ext4_std_error);
+ 
+ /*
+  * ext4_abort is a much stronger failure handler than ext4_error.  The
diff --git a/ldiskfs/kernel_patches/patches/ext4-osd-iop-common-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-osd-iop-common-2.6.32-vanilla.patch
new file mode 100644
index 0000000..3eabdc3
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-osd-iop-common-2.6.32-vanilla.patch
@@ -0,0 +1,229 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:36.373664428 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:39.449662782 +0200
+@@ -1671,6 +1671,19 @@
+ extern int ext4_orphan_del(handle_t *, struct inode *);
+ extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+ 				__u32 start_minor_hash, __u32 *next_hash);
++extern struct inode *ext4_create_inode(handle_t *handle,
++				       struct inode * dir, int mode);
++extern int ext4_add_entry(handle_t *handle, struct dentry *dentry,
++			  struct inode *inode);
++extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
++			     struct ext4_dir_entry_2 * de_del,
++			     struct buffer_head * bh);
++extern struct buffer_head * ext4_find_entry(struct inode *dir,
++					    const struct qstr *d_name,
++					    struct ext4_dir_entry_2 ** res_dir);
++#define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
++extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++			       struct inode *inode);
+ 
+ /* resize.c */
+ extern int ext4_group_add(struct super_block *sb,
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:36.377665203 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:39.453664850 +0200
+@@ -24,6 +24,7 @@
+  *	Theodore Ts'o, 2002
+  */
+ 
++#include <linux/module.h>
+ #include <linux/fs.h>
+ #include <linux/pagemap.h>
+ #include <linux/jbd2.h>
+@@ -901,9 +902,9 @@
+  * The returned buffer_head has ->b_count elevated.  The caller is expected
+  * to brelse() it when appropriate.
+  */
+-static struct buffer_head * ext4_find_entry (struct inode *dir,
+-					const struct qstr *d_name,
+-					struct ext4_dir_entry_2 ** res_dir)
++struct buffer_head * ext4_find_entry(struct inode *dir,
++				      const struct qstr *d_name,
++				      struct ext4_dir_entry_2 ** res_dir)
+ {
+ 	struct super_block *sb;
+ 	struct buffer_head *bh_use[NAMEI_RA_SIZE];
+@@ -1010,6 +1011,7 @@
+ 		brelse(bh_use[ra_ptr]);
+ 	return ret;
+ }
++EXPORT_SYMBOL(ext4_find_entry);
+ 
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
+ 		       struct ext4_dir_entry_2 **res_dir, int *err)
+@@ -1533,8 +1535,8 @@
+  * may not sleep between calling this and putting something into
+  * the entry, as someone else might have used it while you slept.
+  */
+-static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+-			  struct inode *inode)
++int ext4_add_entry(handle_t *handle, struct dentry *dentry,
++		   struct inode *inode)
+ {
+ 	struct inode *dir = dentry->d_parent->d_inode;
+ 	struct buffer_head *bh;
+@@ -1585,6 +1587,7 @@
+ 		ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+ 	return retval;
+ }
++EXPORT_SYMBOL(ext4_add_entry);
+ 
+ /*
+  * Returns 0 for success, or a negative error value
+@@ -1725,10 +1728,10 @@
+  * ext4_delete_entry deletes a directory entry by merging it with the
+  * previous entry
+  */
+-static int ext4_delete_entry(handle_t *handle,
+-			     struct inode *dir,
+-			     struct ext4_dir_entry_2 *de_del,
+-			     struct buffer_head *bh)
++int ext4_delete_entry(handle_t *handle,
++		      struct inode *dir,
++		      struct ext4_dir_entry_2 *de_del,
++		      struct buffer_head *bh)
+ {
+ 	struct ext4_dir_entry_2 *de, *pde;
+ 	unsigned int blocksize = dir->i_sb->s_blocksize;
+@@ -1763,7 +1766,7 @@
+ 	}
+ 	return -ENOENT;
+ }
+-
++EXPORT_SYMBOL(ext4_delete_entry);
+ /*
+  * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
+  * since this indicates that nlinks count was previously 1.
+@@ -1827,6 +1830,27 @@
+ 	return inum;
+ }
+ 
++struct inode * ext4_create_inode(handle_t *handle, struct inode * dir, int mode)
++{
++	struct inode *inode;
++
++	inode = ext4_new_inode(handle, dir, mode, 0, EXT4_SB(dir->i_sb)->s_inode_goal);
++	if (!IS_ERR(inode)) {
++		if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode)) {
++#ifdef CONFIG_LDISKFS_FS_XATTR
++			inode->i_op = &ext4_special_inode_operations;
++#endif
++		} else {
++			inode->i_op = &ext4_file_inode_operations;
++			inode->i_fop = &ext4_file_operations;
++			ext4_set_aops(inode);
++		}
++		unlock_new_inode(inode);
++	}
++	return inode;
++}
++EXPORT_SYMBOL(ext4_create_inode);
++
+ /*
+  * By the time this is called, we already have created
+  * the directory cache entry for the new file, but it
+@@ -1903,40 +1927,33 @@
+ 	return err;
+ }
+ 
+-static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++/* Initialize @inode as a subdirectory of @dir, and add the
++ * "." and ".." entries into the first directory block. */
++int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
++			struct inode *inode)
+ {
+-	handle_t *handle;
+-	struct inode *inode;
+-	struct buffer_head *dir_block;
+-	struct ext4_dir_entry_2 *de;
++	struct buffer_head * dir_block;
++	struct ext4_dir_entry_2 * de;
+ 	unsigned int blocksize = dir->i_sb->s_blocksize;
+-	int err, retries = 0;
+-
+-	if (EXT4_DIR_LINK_MAX(dir))
+-		return -EMLINK;
++	int err = 0;
+ 
+-retry:
+-	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+-					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+-					EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
+ 	if (IS_ERR(handle))
+ 		return PTR_ERR(handle);
+ 
+ 	if (IS_DIRSYNC(dir))
+ 		ext4_handle_sync(handle);
+ 
+-	inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
+-			       &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
+-	err = PTR_ERR(inode);
+-	if (IS_ERR(inode))
+-		goto out_stop;
+ 
+ 	inode->i_op = &ext4_dir_inode_operations;
+ 	inode->i_fop = &ext4_dir_operations;
+ 	inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+ 	dir_block = ext4_bread(handle, inode, 0, 1, &err);
+-	if (!dir_block)
+-		goto out_clear_inode;
++	if (!dir_block) {
++		clear_nlink(inode);
++		ext4_mark_inode_dirty(handle, inode);
++		iput (inode);
++		goto get_out;
++	}
+ 	BUFFER_TRACE(dir_block, "get_write_access");
+ 	ext4_journal_get_write_access(handle, dir_block);
+ 	de = (struct ext4_dir_entry_2 *) dir_block->b_data;
+@@ -1958,9 +1975,45 @@
+ 	ext4_handle_dirty_metadata(handle, dir, dir_block);
+ 	brelse(dir_block);
+ 	ext4_mark_inode_dirty(handle, inode);
++get_out:
++	return err;
++}
++EXPORT_SYMBOL(ext4_add_dot_dotdot);
++
++
++static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++	handle_t *handle;
++	struct inode *inode;
++	int err, retries = 0;
++
++	if (EXT4_DIR_LINK_MAX(dir))
++		return -EMLINK;
++
++retry:
++	handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
++					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
++					2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
++	if (IS_DIRSYNC(dir))
++		handle->h_sync = 1;
++
++	inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
++			       &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
++	err = PTR_ERR(inode);
++	if (IS_ERR(inode))
++		goto out_stop;
++
++	err = ext4_add_dot_dotdot(handle, dir, inode);
++	if (err) {
++		unlock_new_inode(inode);
++		goto out_stop;
++	}
++
+ 	err = ext4_add_entry(handle, dentry, inode);
+ 	if (err) {
+-out_clear_inode:
+ 		clear_nlink(inode);
+ 		unlock_new_inode(inode);
+ 		ext4_mark_inode_dirty(handle, inode);
diff --git a/ldiskfs/kernel_patches/patches/ext4-pdir-fix-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-pdir-fix-2.6.32-vanilla.patch
new file mode 100644
index 0000000..f97493a
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-pdir-fix-2.6.32-vanilla.patch
@@ -0,0 +1,62 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:30.009672059 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:36.373664428 +0200
+@@ -16,6 +16,7 @@
+ #ifndef _EXT4_H
+ #define _EXT4_H
+ 
++#include <linux/dynlocks.h>
+ #include <linux/types.h>
+ #include <linux/blkdev.h>
+ #include <linux/magic.h>
+@@ -700,6 +701,10 @@
+ 	__u32	i_dtime;
+ 	ext4_fsblk_t	i_file_acl;
+ 
++	/* following fields for parallel directory operations -bzzz */
++	struct dynlock   i_htree_lock;
++	struct semaphore i_append_sem;
++
+ 	/*
+ 	 * i_block_group is the number of the block group which contains
+ 	 * this file's inode.  Constant across the lifetime of the inode,
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:30.005662279 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:36.377665203 +0200
+@@ -53,6 +53,11 @@
+ 					ext4_lblk_t *block, int *err)
+ {
+ 	struct buffer_head *bh;
++	struct ext4_inode_info *ei = EXT4_I(inode);
++
++	/* with parallel dir operations all appends
++	* have to be serialized -bzzz */
++	down(&ei->i_append_sem);
+ 
+ 	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+ 
+@@ -65,7 +70,9 @@
+ 			brelse(bh);
+ 			bh = NULL;
+ 		}
++		ei->i_disksize = inode->i_size;
+ 	}
++	up(&ei->i_append_sem);
+ 	return bh;
+ }
+ 
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:32.945663837 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:36.377665203 +0200
+@@ -702,6 +702,8 @@
+ 
+ 	ei->vfs_inode.i_version = 1;
+ 	ei->vfs_inode.i_data.writeback_index = 0;
++	dynlock_init(&ei->i_htree_lock);
++	sema_init(&ei->i_append_sem, 1);
+ 	memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
+ 	INIT_LIST_HEAD(&ei->i_prealloc_list);
+ 	spin_lock_init(&ei->i_prealloc_lock);
diff --git a/ldiskfs/kernel_patches/patches/ext4-prealloc-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-prealloc-2.6.32-vanilla.patch
new file mode 100644
index 0000000..6197d68
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-prealloc-2.6.32-vanilla.patch
@@ -0,0 +1,381 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:12.449674842 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:15.325667336 +0200
+@@ -1069,11 +1069,14 @@
+ 
+ 	/* tunables */
+ 	unsigned long s_stripe;
+-	unsigned int s_mb_stream_request;
++	unsigned long s_mb_small_req;
++	unsigned long s_mb_large_req;
+ 	unsigned int s_mb_max_to_scan;
+ 	unsigned int s_mb_min_to_scan;
+ 	unsigned int s_mb_stats;
+ 	unsigned int s_mb_order2_reqs;
++	unsigned long *s_mb_prealloc_table;
++	unsigned long s_mb_prealloc_table_size;
+ 	unsigned int s_mb_group_prealloc;
+ 	unsigned int s_max_writeback_mb_bump;
+ 	/* where last allocation was done - for stream allocation */
+Index: linux-source-2.6.32/fs/ext4/mballoc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:08:29.325663898 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:10:15.333660398 +0200
+@@ -1821,6 +1821,25 @@
+ 	ext4_mb_check_limits(ac, e4b, 1);
+ }
+ 
++static void ext4_mb_prealloc_table_add(struct ext4_sb_info *sbi, int value)
++{
++	int i;
++
++	if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
++		return;
++
++	for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
++		if (sbi->s_mb_prealloc_table[i] == 0) {
++			sbi->s_mb_prealloc_table[i] = value;
++			return;
++		}
++
++		/* they should add values in order */
++		if (value <= sbi->s_mb_prealloc_table[i])
++			return;
++	}
++}
++
+ /*
+  * This is a special case for storages like raid5
+  * we try to find stripe-aligned chunks for stripe-size requests
+@@ -2216,6 +2235,80 @@
+ 	.show   = ext4_mb_seq_groups_show,
+ };
+ 
++#define EXT4_MB_PREALLOC_TABLE          "prealloc_table"
++
++static int ext4_mb_prealloc_table_proc_read(char *page, char **start, off_t off,
++					    int count, int *eof, void *data)
++{
++	struct ext4_sb_info *sbi = data;
++	int len = 0;
++	int i;
++
++	*eof = 1;
++	if (off != 0)
++		return 0;
++
++	for (i = 0; i < sbi->s_mb_prealloc_table_size; i++)
++		len += sprintf(page + len, "%ld ",
++			       sbi->s_mb_prealloc_table[i]);
++	len += sprintf(page + len, "\n");
++
++	*start = page;
++	return len;
++}
++
++static int ext4_mb_prealloc_table_proc_write(struct file *file,
++					     const char __user *buf,
++					     unsigned long cnt, void *data)
++{
++	struct ext4_sb_info *sbi = data;
++	unsigned long value;
++	unsigned long prev = 0;
++	char str[128];
++	char *cur;
++	char *end;
++	unsigned long *new_table;
++	int num = 0;
++	int i = 0;
++
++	if (cnt >= sizeof(str))
++		return -EINVAL;
++	if (copy_from_user(str, buf, cnt))
++		return -EFAULT;
++
++	num = 0;
++	cur = str;
++	end = str + cnt;
++	while (cur < end) {
++		while ((cur < end) && (*cur == ' ')) cur++;
++		value = simple_strtol(cur, &cur, 0);
++		if (value == 0)
++			break;
++		if (value <= prev)
++			return -EINVAL;
++		prev = value;
++		num++;
++	}
++
++	new_table = kmalloc(num * sizeof(*new_table), GFP_KERNEL);
++	if (new_table == NULL)
++		return -ENOMEM;
++	kfree(sbi->s_mb_prealloc_table);
++	memset(new_table, 0, num * sizeof(*new_table));
++	sbi->s_mb_prealloc_table = new_table;
++	sbi->s_mb_prealloc_table_size = num;
++	cur = str;
++	end = str + cnt;
++	while (cur < end && i < num) {
++	while ((cur < end) && (*cur == ' ')) cur++;
++		value = simple_strtol(cur, &cur, 0);
++		ext4_mb_prealloc_table_add(sbi, value);
++		i++;
++	}
++
++	return cnt;
++}
++
+ static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
+ {
+ 	struct super_block *sb = PDE(inode)->data;
+@@ -2455,12 +2548,56 @@
+ 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
+ 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
+ 	sbi->s_mb_stats = MB_DEFAULT_STATS;
+-	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
+ 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
+-	sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
++
++	if (sbi->s_stripe == 0) {
++		sbi->s_mb_prealloc_table_size = 10;
++		i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
++		sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
++		if (sbi->s_mb_prealloc_table == NULL) {
++			kfree(sbi->s_mb_offsets);
++			kfree(sbi->s_mb_maxs);
++			return -ENOMEM;
++		}
++		memset(sbi->s_mb_prealloc_table, 0, i);
++
++		ext4_mb_prealloc_table_add(sbi, 4);
++		ext4_mb_prealloc_table_add(sbi, 8);
++		ext4_mb_prealloc_table_add(sbi, 16);
++		ext4_mb_prealloc_table_add(sbi, 32);
++		ext4_mb_prealloc_table_add(sbi, 64);
++		ext4_mb_prealloc_table_add(sbi, 128);
++		ext4_mb_prealloc_table_add(sbi, 256);
++		ext4_mb_prealloc_table_add(sbi, 512);
++		ext4_mb_prealloc_table_add(sbi, 1024);
++		ext4_mb_prealloc_table_add(sbi, 2048);
++
++		sbi->s_mb_small_req = 256;
++		sbi->s_mb_large_req = 1024;
++		sbi->s_mb_group_prealloc = 512;
++	} else {
++		sbi->s_mb_prealloc_table_size = 3;
++		i = sbi->s_mb_prealloc_table_size * sizeof(unsigned long);
++		sbi->s_mb_prealloc_table = kmalloc(i, GFP_NOFS);
++		if (sbi->s_mb_prealloc_table == NULL) {
++			kfree(sbi->s_mb_offsets);
++			kfree(sbi->s_mb_maxs);
++			return -ENOMEM;
++		}
++		memset(sbi->s_mb_prealloc_table, 0, i);
++
++		ext4_mb_prealloc_table_add(sbi, sbi->s_stripe);
++		ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 2);
++		ext4_mb_prealloc_table_add(sbi, sbi->s_stripe * 4);
++
++		sbi->s_mb_small_req = sbi->s_stripe;
++		sbi->s_mb_large_req = sbi->s_stripe * 8;
++		sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
++	}
+ 
+ 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
+ 	if (sbi->s_locality_groups == NULL) {
++		kfree(sbi->s_mb_prealloc_table);
+ 		kfree(sbi->s_mb_offsets);
+ 		kfree(sbi->s_mb_maxs);
+ 		return -ENOMEM;
+@@ -2474,9 +2611,18 @@
+ 		spin_lock_init(&lg->lg_prealloc_lock);
+ 	}
+ 
+-	if (sbi->s_proc)
++	if (sbi->s_proc) {
++		struct proc_dir_entry *p;
+ 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
+ 				 &ext4_mb_seq_groups_fops, sb);
++		p = create_proc_entry(EXT4_MB_PREALLOC_TABLE, S_IFREG |
++				      S_IRUGO | S_IWUSR, sbi->s_proc);
++		if (p) {
++			p->data = sbi;
++			p->read_proc = ext4_mb_prealloc_table_proc_read;
++			p->write_proc = ext4_mb_prealloc_table_proc_write;
++		}
++	}
+ 
+ 	if (sbi->s_journal)
+ 		sbi->s_journal->j_commit_callback = release_blocks_on_commit;
+@@ -2556,8 +2702,10 @@
+ 	}
+ 
+ 	free_percpu(sbi->s_locality_groups);
+-	if (sbi->s_proc)
++	if (sbi->s_proc) {
+ 		remove_proc_entry("mb_groups", sbi->s_proc);
++		remove_proc_entry(EXT4_MB_PREALLOC_TABLE, sbi->s_proc);
++	}
+ 
+ 	return 0;
+ }
+@@ -2852,11 +3000,12 @@
+ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 				struct ext4_allocation_request *ar)
+ {
+-	int bsbits, max;
++	int bsbits, i, wind;
+ 	ext4_lblk_t end;
+-	loff_t size, orig_size, start_off;
++	loff_t size, orig_size;
+ 	ext4_lblk_t start, orig_start;
+ 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
++	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ 	struct ext4_prealloc_space *pa;
+ 
+ 	/* do normalize only data requests, metadata requests
+@@ -2886,49 +3035,35 @@
+ 	size = size << bsbits;
+ 	if (size < i_size_read(ac->ac_inode))
+ 		size = i_size_read(ac->ac_inode);
++	size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
+ 
+-	/* max size of free chunks */
+-	max = 2 << bsbits;
++	start = wind = 0;
+ 
+-#define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
+-		(req <= (size) || max <= (chunk_size))
++	/* let's choose preallocation window depending on file size */
++	for (i = 0; i < sbi->s_mb_prealloc_table_size; i++) {
++		if (size <= sbi->s_mb_prealloc_table[i]) {
++			wind = sbi->s_mb_prealloc_table[i];
++			break;
++		}
++	}
++	size = wind;
+ 
+-	/* first, try to predict filesize */
+-	/* XXX: should this table be tunable? */
+-	start_off = 0;
+-	if (size <= 16 * 1024) {
+-		size = 16 * 1024;
+-	} else if (size <= 32 * 1024) {
+-		size = 32 * 1024;
+-	} else if (size <= 64 * 1024) {
+-		size = 64 * 1024;
+-	} else if (size <= 128 * 1024) {
+-		size = 128 * 1024;
+-	} else if (size <= 256 * 1024) {
+-		size = 256 * 1024;
+-	} else if (size <= 512 * 1024) {
+-		size = 512 * 1024;
+-	} else if (size <= 1024 * 1024) {
+-		size = 1024 * 1024;
+-	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
+-		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-						(21 - bsbits)) << 21;
+-		size = 2 * 1024 * 1024;
+-	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
+-		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-							(22 - bsbits)) << 22;
+-		size = 4 * 1024 * 1024;
+-	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
+-					(8<<20)>>bsbits, max, 8 * 1024)) {
+-		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-							(23 - bsbits)) << 23;
+-		size = 8 * 1024 * 1024;
+-	} else {
+-		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
+-		size	  = ac->ac_o_ex.fe_len << bsbits;
++	if (wind == 0) {
++		__u64 tstart, tend;
++		/* file is quite large, we now preallocate with
++		 * the biggest configured window with regart to
++		 * logical offset */
++		wind = sbi->s_mb_prealloc_table[i - 1];
++		tstart = ac->ac_o_ex.fe_logical;
++		do_div(tstart, wind);
++		start = tstart * wind;
++		tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
++		do_div(tend, wind);
++		tend = tend * wind + wind;
++		size = tend - start;
+ 	}
+-	orig_size = size = size >> bsbits;
+-	orig_start = start = start_off >> bsbits;
++	orig_size = size;
++	orig_start = start;
+ 
+ 	/* don't cover already allocated blocks in selected range */
+ 	if (ar->pleft && start <= ar->lleft) {
+@@ -3000,7 +3135,6 @@
+ 	}
+ 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
+ 			start > ac->ac_o_ex.fe_logical);
+-	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
+ 
+ 	/* now prepare goal request */
+ 
+@@ -3986,11 +4120,19 @@
+ 
+ 	/* don't use group allocation for large files */
+ 	size = max(size, isize);
+-	if (size > sbi->s_mb_stream_request) {
++	if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
++	    (size >= sbi->s_mb_large_req)) {
+ 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+ 		return;
+ 	}
+ 
++	/*
++	 * request is so large that we don't care about
++	 * streaming - it overweights any possible seek
++	 */
++	if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
++		return;
++
+ 	BUG_ON(ac->ac_lg != NULL);
+ 	/*
+ 	 * locality group prealloc space are per cpu. The reason for having
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:10:06.381677398 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:10:15.337667957 +0200
+@@ -2244,7 +2244,8 @@
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
+-EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
++EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
++EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
+ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+ 
+@@ -2259,7 +2260,8 @@
+ 	ATTR_LIST(mb_max_to_scan),
+ 	ATTR_LIST(mb_min_to_scan),
+ 	ATTR_LIST(mb_order2_req),
+-	ATTR_LIST(mb_stream_req),
++	ATTR_LIST(mb_small_req),
++	ATTR_LIST(mb_large_req),
+ 	ATTR_LIST(mb_group_prealloc),
+ 	ATTR_LIST(max_writeback_mb_bump),
+ 	NULL,
+Index: linux-source-2.6.32/fs/ext4/inode.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:09:34.579776667 +0200
++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:10:15.329671358 +0200
+@@ -2884,6 +2884,11 @@
+ 	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
+ 		return -EROFS;
+ 
++	if (wbc->nr_to_write < sbi->s_mb_small_req) {
++		nr_to_writebump = sbi->s_mb_small_req - wbc->nr_to_write;
++		wbc->nr_to_write = sbi->s_mb_small_req;
++	}
++
+ 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ 		range_whole = 1;
+ 
diff --git a/ldiskfs/kernel_patches/patches/ext4-print-inum-in-htree-warning-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-print-inum-in-htree-warning-2.6.32-vanilla.patch
new file mode 100644
index 0000000..0c3aee0
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-print-inum-in-htree-warning-2.6.32-vanilla.patch
@@ -0,0 +1,15 @@
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:03.433668774 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:10:09.617666134 +0200
+@@ -395,8 +395,8 @@
+ 	    root->info.hash_version != DX_HASH_HALF_MD4 &&
+ 	    root->info.hash_version != DX_HASH_LEGACY) {
+ 		ext4_warning(dir->i_sb, __func__,
+-			     "Unrecognised inode hash code %d",
+-			     root->info.hash_version);
++			     "Unrecognised inode hash code %d for directory "
++                            "#%lu", root->info.hash_version, dir->i_ino);
+ 		brelse(bh);
+ 		*err = ERR_BAD_DX_DIR;
+ 		goto fail;
diff --git a/ldiskfs/kernel_patches/patches/ext4-remove-cond_resched-calls-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-remove-cond_resched-calls-2.6.32-vanilla.patch
new file mode 100644
index 0000000..204f59c
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-remove-cond_resched-calls-2.6.32-vanilla.patch
@@ -0,0 +1,29 @@
+Index: linux-source-2.6.32/fs/ext4/ialloc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ialloc.c	2012-06-28 12:09:19.417666703 +0200
++++ linux-source-2.6.32/fs/ext4/ialloc.c	2012-06-28 12:09:23.393677834 +0200
+@@ -1199,7 +1199,6 @@
+ 		if (!gdp)
+ 			continue;
+ 		desc_count += ext4_free_inodes_count(sb, gdp);
+-		cond_resched();
+ 	}
+ 	return desc_count;
+ #endif
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:09:19.413668742 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:09:23.393677834 +0200
+@@ -3680,11 +3680,9 @@
+ 		 * block group descriptors.  If the sparse superblocks
+ 		 * feature is turned on, then not all groups have this.
+ 		 */
+-		for (i = 0; i < ngroups; i++) {
++		for (i = 0; i < ngroups; i++)
+ 			overhead += ext4_bg_has_super(sb, i) +
+ 				ext4_bg_num_gdb(sb, i);
+-			cond_resched();
+-		}
+ 
+ 		/*
+ 		 * Every block group has an inode bitmap, a block
diff --git a/ldiskfs/kernel_patches/patches/ext4-store-tree-generation-at-find-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-store-tree-generation-at-find-2.6.32-vanilla.patch
new file mode 100644
index 0000000..3231e6c
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-store-tree-generation-at-find-2.6.32-vanilla.patch
@@ -0,0 +1,67 @@
+Index: linux-source-2.6.32/fs/ext4/ext4_extents.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4_extents.h	2012-06-28 12:10:23.325664479 +0200
++++ linux-source-2.6.32/fs/ext4/ext4_extents.h	2012-06-28 12:11:48.561664193 +0200
+@@ -113,6 +113,7 @@
+  * Truncate uses it to simulate recursive walking.
+  */
+ struct ext4_ext_path {
++	unsigned long			p_generation;
+ 	ext4_fsblk_t			p_block;
+ 	__u16				p_depth;
+ 	struct ext4_extent		*p_ext;
+Index: linux-source-2.6.32/fs/ext4/extents.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/extents.c	2012-06-28 12:10:54.881664295 +0200
++++ linux-source-2.6.32/fs/ext4/extents.c	2012-06-28 12:11:48.565668571 +0200
+@@ -1763,7 +1763,7 @@
+ {
+ 	struct ext4_ext_path *path = NULL;
+ 	struct ext4_ext_cache cbex;
+-	struct ext4_extent *ex;
++	struct ext4_extent _ex, *ex;
+ 	ext4_lblk_t next, start = 0, end = 0;
+ 	ext4_lblk_t last = block + num;
+ 	int depth, exists, err = 0;
+@@ -1776,17 +1776,29 @@
+ 		/* find extent for this block */
+ 		down_read(&EXT4_I(inode)->i_data_sem);
+ 		path = ext4_ext_find_extent(inode, block, path);
+-		up_read(&EXT4_I(inode)->i_data_sem);
+ 		if (IS_ERR(path)) {
++			up_read(&EXT4_I(inode)->i_data_sem);
+ 			err = PTR_ERR(path);
+ 			path = NULL;
+ 			break;
+ 		}
+ 
++		path[0].p_generation = EXT4_I(inode)->i_ext_generation;
++
+ 		depth = ext_depth(inode);
+-		BUG_ON(path[depth].p_hdr == NULL);
+-		ex = path[depth].p_ext;
++		if (unlikely(path[depth].p_hdr == NULL)) {
++			up_read(&EXT4_I(inode)->i_data_sem);
++			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
++			err = -EIO;
++			break;
++		}
++		ex = NULL;
++		if (path[depth].p_ext) {
++			_ex = *path[depth].p_ext;
++			ex = &_ex;
++		}
+ 		next = ext4_ext_next_allocated_block(path);
++		up_read(&EXT4_I(inode)->i_data_sem);
+ 
+ 		exists = 0;
+ 		if (!ex) {
+@@ -1836,7 +1848,7 @@
+ 		}
+ 
+ 		BUG_ON(cbex.ec_len == 0);
+-		err = func(inode, path, &cbex, ex, cbdata);
++		err = func(inode, path, &cbex, NULL, cbdata);
+ 		ext4_ext_drop_refs(path);
+ 
+ 		if (err < 0)
diff --git a/ldiskfs/kernel_patches/patches/ext4-vmalloc-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-vmalloc-2.6.32-vanilla.patch
new file mode 100644
index 0000000..86c8818
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-vmalloc-2.6.32-vanilla.patch
@@ -0,0 +1,210 @@
+Index: linux-source-2.6.32/fs/ext4/super.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/super.c	2012-06-28 12:11:27.705663349 +0200
++++ linux-source-2.6.32/fs/ext4/super.c	2012-06-28 12:11:38.097666318 +0200
+@@ -639,7 +639,12 @@
+ 
+ 	for (i = 0; i < sbi->s_gdb_count; i++)
+ 		brelse(sbi->s_group_desc[i]);
+-	kfree(sbi->s_group_desc);
++
++	if (is_vmalloc_addr(sbi->s_group_desc))
++		vfree(sbi->s_group_desc);
++	else
++		kfree(sbi->s_group_desc);
++
+ 	if (is_vmalloc_addr(sbi->s_flex_groups))
+ 		vfree(sbi->s_flex_groups);
+ 	else
+@@ -2466,12 +2471,13 @@
+ 	unsigned long offset = 0;
+ 	unsigned long journal_devnum = 0;
+ 	unsigned long def_mount_opts;
+-	struct inode *root;
++	struct inode *root = NULL;
+ 	char *cp;
+ 	const char *descr;
+ 	int ret = -EINVAL;
+ 	int blocksize;
+ 	unsigned int db_count;
++	size_t size;
+ 	unsigned int i;
+ 	int needs_recovery, has_huge_files;
+ 	__u64 blocks_count;
+@@ -2794,11 +2800,18 @@
+ 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+ 		   EXT4_DESC_PER_BLOCK(sb);
+-	sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
+-				    GFP_KERNEL);
++	size = (size_t)db_count * sizeof(struct buffer_head *);
++	sbi->s_group_desc = kzalloc(size, GFP_KERNEL);
+ 	if (sbi->s_group_desc == NULL) {
+-		ext4_msg(sb, KERN_ERR, "not enough memory");
+-		goto failed_mount;
++		sbi->s_group_desc = vmalloc(size);
++		if (sbi->s_group_desc != NULL) {
++			memset(sbi->s_group_desc, 0, size);
++		} else {
++			ext4_msg(sb, KERN_ERR, "no memory for %u groups (%u)\n",
++				 sbi->s_groups_count, (unsigned int)size);
++			ret = -ENOMEM;
++			goto failed_mount;
++		}
+ 	}
+ 
+ #ifdef __BIG_ENDIAN
+@@ -3003,17 +3016,16 @@
+ 	if (IS_ERR(root)) {
+ 		ext4_msg(sb, KERN_ERR, "get root inode failed");
+ 		ret = PTR_ERR(root);
++		root = NULL;
+ 		goto failed_mount4;
+ 	}
+ 	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+-		iput(root);
+ 		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
+ 		goto failed_mount4;
+ 	}
+ 	sb->s_root = d_alloc_root(root);
+ 	if (!sb->s_root) {
+ 		ext4_msg(sb, KERN_ERR, "get root dentry failed");
+-		iput(root);
+ 		ret = -ENOMEM;
+ 		goto failed_mount4;
+ 	}
+@@ -3064,6 +3076,7 @@
+ 	if (err) {
+ 		ext4_msg(sb, KERN_ERR, "failed to initalize mballoc (%d)",
+ 			 err);
++		ret = err;
+ 		goto failed_mount4;
+ 	}
+ 
+@@ -3105,6 +3118,8 @@
+ 	goto failed_mount;
+ 
+ failed_mount4:
++	iput(root);
++	sb->s_root = NULL;
+ 	ext4_msg(sb, KERN_ERR, "mount failed");
+ 	destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
+ failed_mount_wq:
+@@ -3129,7 +3144,11 @@
+ failed_mount2:
+ 	for (i = 0; i < db_count; i++)
+ 		brelse(sbi->s_group_desc[i]);
+-	kfree(sbi->s_group_desc);
++
++	if (is_vmalloc_addr(sbi->s_group_desc))
++		vfree(sbi->s_group_desc);
++	else
++		kfree(sbi->s_group_desc);
+ failed_mount:
+ 	if (sbi->s_proc) {
+ 		remove_proc_entry(sb->s_id, ext4_proc_root);
+Index: linux-source-2.6.32/fs/ext4/mballoc.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/mballoc.c	2012-06-28 12:11:12.521665249 +0200
++++ linux-source-2.6.32/fs/ext4/mballoc.c	2012-06-28 12:11:38.101746741 +0200
+@@ -2469,24 +2469,37 @@
+ 	while (array_size < sizeof(*sbi->s_group_info) *
+ 	       num_meta_group_infos_max)
+ 		array_size = array_size << 1;
+-	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
+-	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
+-	 * So a two level scheme suffices for now. */
+-	sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
++	/* A 16TB filesystem with 64-bit pointers requires an 8192 byte
++	 * kmalloc(). Filesystems larger than 2^32 blocks (16TB normally)
++	 * have group descriptors at least twice as large (64 bytes or
++	 * more vs. 32 bytes for traditional ext3 filesystems), so a 128TB
++	 * filesystem needs a 128kB allocation, which may need vmalloc(). */
++	sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
+ 	if (sbi->s_group_info == NULL) {
+-		printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
+-		return -ENOMEM;
++		sbi->s_group_info = vmalloc(array_size);
++		if (sbi->s_group_info != NULL) {
++			memset(sbi->s_group_info, 0, array_size);
++		} else {
++			ext4_msg(sb, KERN_ERR, "no memory for groupinfo (%u)\n",
++				 array_size);
++			return -ENOMEM;
++		}
+ 	}
+ 	sbi->s_buddy_cache = new_inode(sb);
+ 	if (sbi->s_buddy_cache == NULL) {
+-		printk(KERN_ERR "EXT4-fs: can't get new inode\n");
++		ext4_msg(sb, KERN_ERR, "can't get new inode\n");
+ 		goto err_freesgi;
+ 	}
++	/* To avoid potentially colliding with an valid on-disk inode number,
++	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
++	 * not in the inode hash, so it should never be found by iget(), but
++	 * this will avoid confusion if it ever shows up during debugging. */
++	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
+ 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
+ 	for (i = 0; i < ngroups; i++) {
+ 		desc = ext4_get_group_desc(sb, i, NULL);
+ 		if (desc == NULL) {
+-			printk(KERN_ERR
++			ext4_msg(sb, KERN_ERR,
+ 				"EXT4-fs: can't read descriptor %u\n", i);
+ 			goto err_freebuddy;
+ 		}
+@@ -2504,7 +2517,10 @@
+ 		kfree(sbi->s_group_info[i]);
+ 	iput(sbi->s_buddy_cache);
+ err_freesgi:
+-	kfree(sbi->s_group_info);
++	if (is_vmalloc_addr(sbi->s_group_info))
++		vfree(sbi->s_group_info);
++	else
++		kfree(sbi->s_group_info);
+ 	return -ENOMEM;
+ }
+ 
+@@ -2545,14 +2561,6 @@
+ 		i++;
+ 	} while (i <= sb->s_blocksize_bits + 1);
+ 
+-	/* init file for buddy data */
+-	ret = ext4_mb_init_backend(sb);
+-	if (ret != 0) {
+-		kfree(sbi->s_mb_offsets);
+-		kfree(sbi->s_mb_maxs);
+-		return ret;
+-	}
+-
+ 	spin_lock_init(&sbi->s_md_lock);
+ 	spin_lock_init(&sbi->s_bal_lock);
+ 
+@@ -2622,6 +2630,15 @@
+ 		spin_lock_init(&lg->lg_prealloc_lock);
+ 	}
+ 
++	/* init file for buddy data */
++	ret = ext4_mb_init_backend(sb);
++	if (ret != 0) {
++		kfree(sbi->s_mb_prealloc_table);
++		kfree(sbi->s_mb_offsets);
++		kfree(sbi->s_mb_maxs);
++		return ret;
++	}
++
+ 	if (sbi->s_proc) {
+ 		struct proc_dir_entry *p;
+ 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
+@@ -2682,7 +2699,10 @@
+ 			EXT4_DESC_PER_BLOCK_BITS(sb);
+ 		for (i = 0; i < num_meta_group_infos; i++)
+ 			kfree(sbi->s_group_info[i]);
+-		kfree(sbi->s_group_info);
++		if (is_vmalloc_addr(sbi->s_group_info))
++			vfree(sbi->s_group_info);
++		else
++			kfree(sbi->s_group_info);
+ 	}
+ 	kfree(sbi->s_mb_offsets);
+ 	kfree(sbi->s_mb_maxs);
diff --git a/ldiskfs/kernel_patches/patches/ext4-wantedi-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-wantedi-2.6.32-vanilla.patch
new file mode 100644
index 0000000..d9bf1d9
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-wantedi-2.6.32-vanilla.patch
@@ -0,0 +1,80 @@
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:08:39.705675504 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:08:58.217663806 +0200
+@@ -144,6 +144,17 @@
+ 	u16 size;
+ };
+ 
++/*
++ * dentry_param used by ext4_new_inode_wantedi()
++ */
++#define LVFS_DENTRY_PARAM_MAGIC		20070216UL
++struct lvfs_dentry_params
++{
++	unsigned long   ldp_inum;
++	unsigned long	ldp_flags;
++	u32		ldp_magic;
++};
++
+ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
+ static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
+ static inline unsigned dx_get_hash(struct dx_entry *entry);
+@@ -1753,6 +1764,19 @@
+ 	return err;
+ }
+ 
++static unsigned ext4_dentry_goal(struct super_block *sb, struct dentry *dentry)
++{
++	unsigned inum = EXT4_SB(sb)->s_inode_goal;
++
++	if (dentry->d_fsdata != NULL) {
++		struct lvfs_dentry_params *param = dentry->d_fsdata;
++
++		if (param->ldp_magic == LVFS_DENTRY_PARAM_MAGIC)
++			inum = param->ldp_inum;
++	}
++	return inum;
++}
++
+ /*
+  * By the time this is called, we already have created
+  * the directory cache entry for the new file, but it
+@@ -1778,7 +1802,8 @@
+ 	if (IS_DIRSYNC(dir))
+ 		ext4_handle_sync(handle);
+ 
+-	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0);
++	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name,
++				ext4_dentry_goal(dir->i_sb, dentry));
+ 	err = PTR_ERR(inode);
+ 	if (!IS_ERR(inode)) {
+ 		inode->i_op = &ext4_file_inode_operations;
+@@ -1812,7 +1837,8 @@
+ 	if (IS_DIRSYNC(dir))
+ 		ext4_handle_sync(handle);
+ 
+-	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0);
++	inode = ext4_new_inode(handle, dir, mode, &dentry->d_name,
++				ext4_dentry_goal(dir->i_sb, dentry));
+ 	err = PTR_ERR(inode);
+ 	if (!IS_ERR(inode)) {
+ 		init_special_inode(inode, inode->i_mode, rdev);
+@@ -1850,7 +1876,7 @@
+ 		ext4_handle_sync(handle);
+ 
+ 	inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
+-			       &dentry->d_name, 0);
++			       &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
+ 	err = PTR_ERR(inode);
+ 	if (IS_ERR(inode))
+ 		goto out_stop;
+@@ -2271,7 +2297,7 @@
+ 		ext4_handle_sync(handle);
+ 
+ 	inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO,
+-			       &dentry->d_name, 0);
++			       &dentry->d_name, ext4_dentry_goal(dir->i_sb, dentry));
+ 	err = PTR_ERR(inode);
+ 	if (IS_ERR(inode))
+ 		goto out_stop;
diff --git a/ldiskfs/kernel_patches/patches/ext4-xattr-no-update-ctime-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4-xattr-no-update-ctime-2.6.32-vanilla.patch
new file mode 100644
index 0000000..b0bee6d
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4-xattr-no-update-ctime-2.6.32-vanilla.patch
@@ -0,0 +1,32 @@
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:06.385666114 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:10:12.449674842 +0200
+@@ -1503,6 +1503,13 @@
+ #define EXT4_MAX_DIR_SIZE_NAME		"max_dir_size"
+ 
+ /*
++ * Indicates that ctime should not be updated in ext4_xattr_set_handle()
++ */
++#ifndef XATTR_NO_CTIME
++#define XATTR_NO_CTIME 0x80
++#endif
++
++/*
+  * Function prototypes
+  */
+ 
+Index: linux-source-2.6.32/fs/ext4/xattr.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/xattr.c	2012-06-28 12:08:30.285665815 +0200
++++ linux-source-2.6.32/fs/ext4/xattr.c	2012-06-28 12:10:12.481658638 +0200
+@@ -1045,7 +1045,8 @@
+ 	}
+ 	if (!error) {
+ 		ext4_xattr_update_super_block(handle, inode->i_sb);
+-		inode->i_ctime = ext4_current_time(inode);
++		if (!(flags & XATTR_NO_CTIME))
++			inode->i_ctime = ext4_current_time(inode);
+ 		if (!value)
+ 			ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ 		error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
diff --git a/ldiskfs/kernel_patches/patches/ext4_data_in_dirent-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4_data_in_dirent-2.6.32-vanilla.patch
new file mode 100644
index 0000000..d99ae18
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4_data_in_dirent-2.6.32-vanilla.patch
@@ -0,0 +1,521 @@
+this patch implements feature which allows ext4 fs users (e.g. Lustre)
+to store data in ext4 dirent.
+data is stored in ext4 dirent after file-name, this space is accounted
+in de->rec_len. flag EXT4_DIRENT_LUFID added to d_type if extra data
+is present.
+
+make use of dentry->d_fsdata to pass fid to ext4. so no
+changes in ext4_add_entry() interface required.
+
+Index: linux-source-2.6.32/fs/ext4/dir.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/dir.c	2012-06-28 12:08:15.765666233 +0200
++++ linux-source-2.6.32/fs/ext4/dir.c	2012-06-28 12:11:16.361665139 +0200
+@@ -53,11 +53,18 @@
+ 
+ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
++	int fl_index = filetype & EXT4_FT_MASK;
++
+ 	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
+-	    (filetype >= EXT4_FT_MAX))
++	    (fl_index >= EXT4_FT_MAX))
+ 		return DT_UNKNOWN;
+ 
+-	return (ext4_filetype_table[filetype]);
++	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
++		return (ext4_filetype_table[fl_index]);
++
++	return (ext4_filetype_table[fl_index]) |
++		(filetype & EXT4_DIRENT_LUFID);
++
+ }
+ 
+ 
+@@ -70,11 +77,11 @@
+ 	const int rlen = ext4_rec_len_from_disk(de->rec_len,
+ 						dir->i_sb->s_blocksize);
+ 
+-	if (rlen < EXT4_DIR_REC_LEN(1))
++	if (rlen < __EXT4_DIR_REC_LEN(1))
+ 		error_msg = "rec_len is smaller than minimal";
+ 	else if (rlen % 4 != 0)
+ 		error_msg = "rec_len % 4 != 0";
+-	else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
++	else if (rlen < EXT4_DIR_REC_LEN(de))
+ 		error_msg = "rec_len is too small for name_len";
+ 	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ 		error_msg = "directory entry across blocks";
+@@ -181,7 +188,7 @@
+ 				 * failure will be detected in the
+ 				 * dirent test below. */
+ 				if (ext4_rec_len_from_disk(de->rec_len,
+-					sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
++					sb->s_blocksize) < __EXT4_DIR_REC_LEN(1))
+ 					break;
+ 				i += ext4_rec_len_from_disk(de->rec_len,
+ 							    sb->s_blocksize);
+@@ -344,12 +351,17 @@
+ 	struct fname *fname, *new_fn;
+ 	struct dir_private_info *info;
+ 	int len;
++	int extra_data = 1;
+ 
+ 	info = (struct dir_private_info *) dir_file->private_data;
+ 	p = &info->root.rb_node;
+ 
+ 	/* Create and allocate the fname structure */
+-	len = sizeof(struct fname) + dirent->name_len + 1;
++	if (dirent->file_type & EXT4_DIRENT_LUFID)
++		extra_data = ext4_get_dirent_data_len(dirent);
++
++	len = sizeof(struct fname) + dirent->name_len + extra_data;
++
+ 	new_fn = kzalloc(len, GFP_KERNEL);
+ 	if (!new_fn)
+ 		return -ENOMEM;
+@@ -358,7 +370,7 @@
+ 	new_fn->inode = le32_to_cpu(dirent->inode);
+ 	new_fn->name_len = dirent->name_len;
+ 	new_fn->file_type = dirent->file_type;
+-	memcpy(new_fn->name, dirent->name, dirent->name_len);
++	memcpy(new_fn->name, dirent->name, dirent->name_len + extra_data);
+ 	new_fn->name[dirent->name_len] = 0;
+ 
+ 	while (*p) {
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:10:58.337671542 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:16.361665139 +0200
+@@ -1258,6 +1258,7 @@
+ #define EXT4_FEATURE_INCOMPAT_64BIT		0x0080
+ #define EXT4_FEATURE_INCOMPAT_MMP               0x0100
+ #define EXT4_FEATURE_INCOMPAT_FLEX_BG		0x0200
++#define EXT4_FEATURE_INCOMPAT_DIRDATA		0x1000
+ 
+ #define EXT4_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
+ #define EXT4_FEATURE_INCOMPAT_SUPP	(EXT4_FEATURE_INCOMPAT_FILETYPE| \
+@@ -1266,7 +1267,9 @@
+ 					 EXT4_FEATURE_INCOMPAT_EXTENTS| \
+ 					 EXT4_FEATURE_INCOMPAT_64BIT| \
+ 					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+-					 EXT4_FEATURE_INCOMPAT_MMP)
++					 EXT4_FEATURE_INCOMPAT_MMP| \
++					 EXT4_FEATURE_INCOMPAT_DIRDATA)
++
+ #define EXT4_FEATURE_RO_COMPAT_SUPP	(EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+ 					 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+ 					 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
+@@ -1348,6 +1351,43 @@
+ #define EXT4_FT_SYMLINK		7
+ 
+ #define EXT4_FT_MAX		8
++#define EXT4_FT_MASK		0xf
++
++#if EXT4_FT_MAX > EXT4_FT_MASK
++#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
++#endif
++
++/*
++ * d_type has 4 unused bits, so it can hold four types data. these different
++ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
++ * stored, in flag order, after file-name in ext4 dirent.
++*/
++/*
++ * this flag is added to d_type if ext4 dirent has extra data after
++ * filename. this data length is variable and length is stored in first byte
++ * of data. data start after filename NUL byte.
++ * This is used by Lustre FS.
++  */
++#define EXT4_DIRENT_LUFID		0x10
++
++#define EXT4_LUFID_MAGIC    0xAD200907UL
++struct ext4_dentry_param {
++	__u32  edp_magic;	/* EXT4_LUFID_MAGIC */
++	char   edp_len;		/* size of edp_data in bytes */
++	char   edp_data[0];	/* packed array of data */
++} __attribute__((packed));
++
++static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
++		struct ext4_dentry_param* p)
++
++{
++	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_DIRDATA))
++		return NULL;
++	if (p && p->edp_magic == EXT4_LUFID_MAGIC)
++		return &p->edp_len;
++	else
++		return NULL;
++}
+ 
+ /*
+  * EXT4_DIR_PAD defines the directory entries boundaries
+@@ -1356,8 +1396,11 @@
+  */
+ #define EXT4_DIR_PAD			4
+ #define EXT4_DIR_ROUND			(EXT4_DIR_PAD - 1)
+-#define EXT4_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT4_DIR_ROUND) & \
++#define __EXT4_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT4_DIR_ROUND) & \
+ 					 ~EXT4_DIR_ROUND)
++#define EXT4_DIR_REC_LEN(de)		(__EXT4_DIR_REC_LEN(de->name_len +\
++					ext4_get_dirent_data_len(de)))
++
+ #define EXT4_MAX_REC_LEN		((1<<16)-1)
+ 
+ /*
+@@ -1684,7 +1727,7 @@
+ 					    struct ext4_dir_entry_2 ** res_dir);
+ #define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
+ extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
+-			       struct inode *inode);
++			       struct inode *inode, const void *, const void *);
+ extern struct buffer_head *ext4_append(handle_t *handle,
+ 				       struct inode *inode,
+ 				       ext4_lblk_t *block, int *err);
+@@ -2015,6 +2058,28 @@
+ 
+ #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
+ 
++/*
++ * Compute the total directory entry data length.
++ * This includes the filename and an implicit NUL terminator (always present),
++ * and optional extensions.  Each extension has a bit set in the high 4 bits of
++ * de->file_type, and the extension length is the first byte in each entry.
++ */
++static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
++{
++	char *len = de->name + de->name_len + 1 /* NUL terminator */;
++	int dlen = 0;
++	__u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
++
++	while (extra_data_flags) {
++		if (extra_data_flags & 1) {
++			dlen += *len + (dlen == 0);
++			len += *len;
++		}
++		extra_data_flags >>= 1;
++	}
++	return dlen;
++}
++
+ #endif	/* __KERNEL__ */
+ 
+ #endif	/* _EXT4_H */
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:10:52.005665948 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:11:16.365664904 +0200
+@@ -169,7 +169,8 @@
+ static unsigned dx_get_limit(struct dx_entry *entries);
+ static void dx_set_count(struct dx_entry *entries, unsigned value);
+ static void dx_set_limit(struct dx_entry *entries, unsigned value);
+-static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
++static inline unsigned dx_root_limit(__u32 blocksize,
++		struct ext4_dir_entry_2 *dot_de, unsigned infosize);
+ static unsigned dx_node_limit(struct inode *dir);
+ static struct dx_frame *dx_probe(const struct qstr *d_name,
+ 				 struct inode *dir,
+@@ -236,11 +237,12 @@
+  */
+ struct dx_root_info * dx_get_dx_info(struct ext4_dir_entry_2 *de)
+ {
+-       /* get dotdot first */
+-       de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
++	BUG_ON(de->name_len != 1);
++	/* get dotdot first */
++	de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
+ 
+-       /* dx root info is after dotdot entry */
+-       de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
++	/* dx root info is after dotdot entry */
++	de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(de));
+ 
+        return (struct dx_root_info *) de;
+ }
+@@ -285,16 +287,23 @@
+ 	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
+ }
+ 
+-static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
++static inline unsigned dx_root_limit(__u32 blocksize,
++		struct ext4_dir_entry_2 *dot_de, unsigned infosize)
+ {
+-	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
+-		EXT4_DIR_REC_LEN(2) - infosize;
++	struct ext4_dir_entry_2 *dotdot_de;
++	unsigned entry_space;
++
++	BUG_ON(dot_de->name_len != 1);
++	dotdot_de = ext4_next_entry(dot_de, blocksize);
++	entry_space = blocksize - EXT4_DIR_REC_LEN(dot_de) -
++			 EXT4_DIR_REC_LEN(dotdot_de) - infosize;
++
+ 	return entry_space / sizeof(struct dx_entry);
+ }
+ 
+ static inline unsigned dx_node_limit(struct inode *dir)
+ {
+-	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
++	unsigned entry_space = dir->i_sb->s_blocksize - __EXT4_DIR_REC_LEN(0);
+ 	return entry_space / sizeof(struct dx_entry);
+ }
+ 
+@@ -341,7 +350,7 @@
+ 				printk(":%x.%u ", h.hash,
+ 				       ((char *) de - base));
+ 			}
+-			space += EXT4_DIR_REC_LEN(de->name_len);
++			space += EXT4_DIR_REC_LEN(de);
+ 			names++;
+ 		}
+ 		de = ext4_next_entry(de, size);
+@@ -445,7 +454,8 @@
+ 
+ 	entries = (struct dx_entry *) (((char *)info) + info->info_length);
+ 
+-	if (dx_get_limit(entries) != dx_root_limit(dir,
++	if (dx_get_limit(entries) != dx_root_limit(dir->i_sb->s_blocksize,
++						   (struct ext4_dir_entry_2*)bh->b_data,
+ 						   info->info_length)) {
+ 		ext4_warning(dir->i_sb, __func__,
+ 			     "dx entry: limit != root limit");
+@@ -635,7 +645,7 @@
+ 	de = (struct ext4_dir_entry_2 *) bh->b_data;
+ 	top = (struct ext4_dir_entry_2 *) ((char *) de +
+ 					   dir->i_sb->s_blocksize -
+-					   EXT4_DIR_REC_LEN(0));
++					   __EXT4_DIR_REC_LEN(0));
+ 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
+ 		if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+ 					(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+@@ -1048,7 +1058,7 @@
+ 			goto errout;
+ 		de = (struct ext4_dir_entry_2 *) bh->b_data;
+ 		top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize -
+-				       EXT4_DIR_REC_LEN(0));
++				       __EXT4_DIR_REC_LEN(0));
+ 		for (; de < top; de = ext4_next_entry(de, sb->s_blocksize)) {
+ 			int off = (block << EXT4_BLOCK_SIZE_BITS(sb))
+ 				  + ((char *) de - bh->b_data);
+@@ -1210,7 +1220,7 @@
+ 	while (count--) {
+ 		struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) 
+ 						(from + (map->offs<<2));
+-		rec_len = EXT4_DIR_REC_LEN(de->name_len);
++		rec_len = EXT4_DIR_REC_LEN(de);
+ 		memcpy (to, de, rec_len);
+ 		((struct ext4_dir_entry_2 *) to)->rec_len =
+ 				ext4_rec_len_to_disk(rec_len, blocksize);
+@@ -1234,7 +1244,7 @@
+ 	while ((char*)de < base + blocksize) {
+ 		next = ext4_next_entry(de, blocksize);
+ 		if (de->inode && de->name_len) {
+-			rec_len = EXT4_DIR_REC_LEN(de->name_len);
++			rec_len = EXT4_DIR_REC_LEN(de);
+ 			if (de > to)
+ 				memmove(to, de, rec_len);
+ 			to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
+@@ -1364,10 +1374,16 @@
+ 	unsigned int	offset = 0;
+ 	unsigned int	blocksize = dir->i_sb->s_blocksize;
+ 	unsigned short	reclen;
+-	int		nlen, rlen, err;
++	int		nlen, rlen, err, dlen = 0;
++	unsigned char	*data;
+ 	char		*top;
+ 
+-	reclen = EXT4_DIR_REC_LEN(namelen);
++	data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
++						dentry->d_fsdata);
++	if (data)
++		dlen = (*data) + 1;
++
++	reclen = __EXT4_DIR_REC_LEN(namelen + dlen);
+ 	if (!de) {
+ 		de = (struct ext4_dir_entry_2 *)bh->b_data;
+ 		top = bh->b_data + blocksize - reclen;
+@@ -1377,7 +1393,7 @@
+ 				return -EIO;
+ 			if (ext4_match(namelen, name, de))
+ 				return -EEXIST;
+-			nlen = EXT4_DIR_REC_LEN(de->name_len);
++			nlen = EXT4_DIR_REC_LEN(de);
+ 			rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
+ 			if ((de->inode? rlen - nlen: rlen) >= reclen)
+ 				break;
+@@ -1395,7 +1411,7 @@
+ 	}
+ 
+ 	/* By now the buffer is marked for journaling */
+-	nlen = EXT4_DIR_REC_LEN(de->name_len);
++	nlen = EXT4_DIR_REC_LEN(de);
+ 	rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
+ 	if (de->inode) {
+ 		struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
+@@ -1411,6 +1427,12 @@
+ 		de->inode = 0;
+ 	de->name_len = namelen;
+ 	memcpy(de->name, name, namelen);
++	if (data) {
++		de->name[namelen] = 0;
++		memcpy(&de->name[namelen + 1], data, *(char *) data);
++		de->file_type |= EXT4_DIRENT_LUFID;
++	}
++
+ 	/*
+ 	 * XXX shouldn't update any times until successful
+ 	 * completion of syscall, but too many callers depend
+@@ -1508,7 +1530,8 @@
+ 
+ 	dx_set_block(entries, 1);
+ 	dx_set_count(entries, 1);
+-	dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
++	dx_set_limit(entries, dx_root_limit(dir->i_sb->s_blocksize,
++					 dot_de, sizeof(root->info)));
+ 
+ 	/* Initialize as for dx_probe */
+ 	hinfo.hash_version = dx_info->hash_version;
+@@ -1539,6 +1562,8 @@
+ 	struct buffer_head * dir_block;
+ 	struct ext4_dir_entry_2 * de;
+ 	int len, journal = 0, err = 0;
++	int dlen = 0;
++	char *data;
+ 
+ 	if (IS_ERR(handle))
+ 		return PTR_ERR(handle);
+@@ -1554,19 +1579,24 @@
+ 	/* the first item must be "." */
+ 	assert(de->name_len == 1 && de->name[0] == '.');
+ 	len = le16_to_cpu(de->rec_len);
+-	assert(len >= EXT4_DIR_REC_LEN(1));
+-	if (len > EXT4_DIR_REC_LEN(1)) {
++	assert(len >= __EXT4_DIR_REC_LEN(1));
++	if (len > __EXT4_DIR_REC_LEN(1)) {
+ 		BUFFER_TRACE(dir_block, "get_write_access");
+ 		err = ext4_journal_get_write_access(handle, dir_block);
+ 		if (err)
+ 			goto out_journal;
+ 
+ 		journal = 1;
+-		de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
++		de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
+ 	}
+ 
+-	len -= EXT4_DIR_REC_LEN(1);
+-	assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
++	len -= EXT4_DIR_REC_LEN(de);
++	data = ext4_dentry_get_data(dir->i_sb,
++			(struct ext4_dentry_param *) dentry->d_fsdata);
++	if (data)
++		dlen = *data + 1;
++	assert(len == 0 || len >= __EXT4_DIR_REC_LEN(2 + dlen));
++
+ 	de = (struct ext4_dir_entry_2 *)
+ 			((char *) de + le16_to_cpu(de->rec_len));
+ 	if (!journal) {
+@@ -1580,10 +1610,15 @@
+ 	if (len > 0)
+ 		de->rec_len = cpu_to_le16(len);
+ 	else
+-		assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
++		assert(le16_to_cpu(de->rec_len) >= __EXT4_DIR_REC_LEN(2));
+ 	de->name_len = 2;
+ 	strcpy (de->name, "..");
+ 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++	if (data) {
++		de->name[2] = 0;
++		memcpy(&de->name[2 + 1], data, dlen);
++		de->file_type |= EXT4_DIRENT_LUFID;
++	}
+ 
+ out_journal:
+ 	if (journal) {
+@@ -2008,12 +2043,13 @@
+ /* Initialize @inode as a subdirectory of @dir, and add the
+  * "." and ".." entries into the first directory block. */
+ int ext4_add_dot_dotdot(handle_t *handle, struct inode * dir,
+-			struct inode *inode)
++			struct inode *inode,
++                        const void *data1, const void *data2)
+ {
+ 	struct buffer_head * dir_block;
+ 	struct ext4_dir_entry_2 * de;
+ 	unsigned int blocksize = dir->i_sb->s_blocksize;
+-	int err = 0;
++	int err = 0, dot_reclen;
+ 
+ 	if (IS_ERR(handle))
+ 		return PTR_ERR(handle);
+@@ -2026,28 +2062,40 @@
+ 	inode->i_fop = &ext4_dir_operations;
+ 	inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+ 	dir_block = ext4_bread(handle, inode, 0, 1, &err);
+-	if (!dir_block) {
+-		clear_nlink(inode);
+-		ext4_mark_inode_dirty(handle, inode);
+-		iput (inode);
++	if (!dir_block)
+ 		goto get_out;
+-	}
++
+ 	BUFFER_TRACE(dir_block, "get_write_access");
+ 	ext4_journal_get_write_access(handle, dir_block);
+ 	de = (struct ext4_dir_entry_2 *) dir_block->b_data;
+ 	de->inode = cpu_to_le32(inode->i_ino);
+ 	de->name_len = 1;
+-	de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
+-					   blocksize);
+ 	strcpy(de->name, ".");
+ 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++	/* get packed fid data*/
++	data1 = ext4_dentry_get_data(dir->i_sb,
++				(struct ext4_dentry_param *) data1);
++	if (data1) {
++		de->name[1] = 0;
++		memcpy(&de->name[2], data1, *(char *) data1);
++		de->file_type |= EXT4_DIRENT_LUFID;
++	}
++	de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de));
++	dot_reclen = cpu_to_le16(de->rec_len);
+ 	de = ext4_next_entry(de, blocksize);
+ 	de->inode = cpu_to_le32(dir->i_ino);
+-	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1),
++	de->rec_len = ext4_rec_len_to_disk(blocksize - dot_reclen,
+ 					   blocksize);
+ 	de->name_len = 2;
+ 	strcpy(de->name, "..");
+ 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++	data2 = ext4_dentry_get_data(dir->i_sb,
++			(struct ext4_dentry_param *) data2);
++	if (data2) {
++		de->name[2] = 0;
++		memcpy(&de->name[3], data2, *(char *) data2);
++		de->file_type |= EXT4_DIRENT_LUFID;
++	}
+ 	inode->i_nlink = 2;
+ 	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
+ 	ext4_handle_dirty_metadata(handle, dir, dir_block);
+@@ -2084,9 +2132,12 @@
+ 	if (IS_ERR(inode))
+ 		goto out_stop;
+ 
+-	err = ext4_add_dot_dotdot(handle, dir, inode);
++	err = ext4_add_dot_dotdot(handle, dir, inode, NULL, NULL);
+ 	if (err) {
++		clear_nlink(inode);
+ 		unlock_new_inode(inode);
++		ext4_mark_inode_dirty(handle, inode);
++		iput (inode);
+ 		goto out_stop;
+ 	}
+ 
+@@ -2122,7 +2173,7 @@
+ 	int err = 0;
+ 
+ 	sb = inode->i_sb;
+-	if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
++	if (inode->i_size < __EXT4_DIR_REC_LEN(1) + __EXT4_DIR_REC_LEN(2) ||
+ 	    !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
+ 		if (err)
+ 			ext4_error(inode->i_sb, __func__,
diff --git a/ldiskfs/kernel_patches/patches/ext4_pdirop-2.6.32-vanilla.patch b/ldiskfs/kernel_patches/patches/ext4_pdirop-2.6.32-vanilla.patch
new file mode 100644
index 0000000..7166474
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/ext4_pdirop-2.6.32-vanilla.patch
@@ -0,0 +1,2272 @@
+Index: linux-source-2.6.32/include/linux/htree_lock.h
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-source-2.6.32/include/linux/htree_lock.h	2012-06-28 12:11:51.973666178 +0200
+@@ -0,0 +1,187 @@
++/*
++ * include/linux/htree_lock.h
++ *
++ * Copyright (c) 2011, Whamcloud, Inc.
++ *
++ * Author: Liang Zhen <liang at whamcloud.com>
++ */
++
++/*
++ * htree lock
++ *
++ * htree_lock is an advanced lock, it can support five lock modes (concept is
++ * taken from DLM) and it's a sleeping lock.
++ *
++ * most common use case is:
++ * - create a htree_lock_head for data
++ * - each thread (contender) creates it's own htree_lock
++ * - contender needs to call htree_lock(lock_node, mode) to protect data and
++ *   call htree_unlock to release lock
++ *
++ * Also, there is advanced use-case which is more complex, user can have
++ * PW/PR lock on particular key, it's mostly used while user holding shared
++ * lock on the htree (CW, CR)
++ *
++ * htree_lock(lock_node, HTREE_LOCK_CR); lock the htree with CR
++ * htree_node_lock(lock_node, HTREE_LOCK_PR, key...); lock @key with PR
++ * ...
++ * htree_node_unlock(lock_node);; unlock the key
++ *
++ * Another tip is, we can have N-levels of this kind of keys, all we need to
++ * do is specifying N-levels while creating htree_lock_head, then we can
++ * lock/unlock a specific level by:
++ * htree_node_lock(lock_node, mode1, key1, level1...);
++ * do something;
++ * htree_node_lock(lock_node, mode1, key2, level2...);
++ * do something;
++ * htree_node_unlock(lock_node, level2);
++ * htree_node_unlock(lock_node, level1);
++ *
++ * NB: for multi-level, should be careful about locking order to avoid deadlock
++ */
++
++#ifndef _LINUX_HTREE_LOCK_H
++#define _LINUX_HTREE_LOCK_H
++
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++
++/*
++ * Lock Modes
++ * more details can be found here:
++ * http://en.wikipedia.org/wiki/Distributed_lock_manager
++ */
++typedef enum {
++	HTREE_LOCK_EX	= 0, /* exclusive lock: incompatible with all others */
++	HTREE_LOCK_PW,	     /* protected write: allows only CR users */
++	HTREE_LOCK_PR,	     /* protected read: allow PR, CR users */
++	HTREE_LOCK_CW,	     /* concurrent write: allow CR, CW users */
++	HTREE_LOCK_CR,	     /* concurrent read: allow all but EX users */
++	HTREE_LOCK_MAX,	     /* number of lock modes */
++} htree_lock_mode_t;
++
++#define HTREE_LOCK_NL		HTREE_LOCK_MAX
++#define HTREE_LOCK_INVAL	0xdead10c
++
++enum {
++	HTREE_HBITS_MIN		= 2,
++	HTREE_HBITS_DEF		= 14,
++	HTREE_HBITS_MAX		= 32,
++};
++
++enum {
++	HTREE_EVENT_DISABLE	= (0),
++	HTREE_EVENT_RD		= (1 << HTREE_LOCK_PR),
++	HTREE_EVENT_WR		= (1 << HTREE_LOCK_PW),
++	HTREE_EVENT_RDWR	= (HTREE_EVENT_RD | HTREE_EVENT_WR),
++};
++
++struct htree_lock;
++
++typedef void (*htree_event_cb_t)(void *target, void *event);
++
++struct htree_lock_child {
++	struct list_head	lc_list;	/* granted list */
++	htree_event_cb_t	lc_callback;	/* event callback */
++	unsigned		lc_events;	/* event types */
++};
++
++struct htree_lock_head {
++	unsigned long		lh_lock;	/* bits lock */
++	/* blocked lock list (htree_lock) */
++	struct list_head	lh_blocked_list;
++	/* # key levels */
++	u16			lh_depth;
++	/* hash bits for key and limit number of locks */
++	u16			lh_hbits;
++	/* counters for blocked locks */
++	u16			lh_nblocked[HTREE_LOCK_MAX];
++	/* counters for granted locks */
++	u16			lh_ngranted[HTREE_LOCK_MAX];
++	/* private data */
++	void			*lh_private;
++	/* array of children locks */
++	struct htree_lock_child	lh_children[0];
++};
++
++/* htree_lock_node_t is child-lock for a specific key (ln_value) */
++struct htree_lock_node {
++	htree_lock_mode_t	ln_mode;
++	/* major hash key */
++	u16			ln_major_key;
++	/* minor hash key */
++	u16			ln_minor_key;
++	struct list_head	ln_major_list;
++	struct list_head	ln_minor_list;
++	/* alive list, all locks (granted, blocked, listening) are on it */
++	struct list_head	ln_alive_list;
++	/* blocked list */
++	struct list_head	ln_blocked_list;
++	/* granted list */
++	struct list_head	ln_granted_list;
++	void			*ln_ev_target;
++};
++
++struct htree_lock {
++	struct task_struct	*lk_task;
++	struct htree_lock_head	*lk_head;
++	void			*lk_private;
++	unsigned		lk_depth;
++	htree_lock_mode_t	lk_mode;
++	struct list_head	lk_blocked_list;
++	struct htree_lock_node	lk_nodes[0];
++};
++
++/* create a lock head, which stands for a resource */
++struct htree_lock_head *htree_lock_head_alloc(unsigned depth,
++					      unsigned hbits, unsigned priv);
++/* free a lock head */
++void htree_lock_head_free(struct htree_lock_head *lhead);
++/* register event callback for child lock at level @depth */
++void htree_lock_event_attach(struct htree_lock_head *lhead, unsigned depth,
++			     unsigned events, htree_event_cb_t callback);
++/* create a lock handle, which stands for a thread */
++struct htree_lock *htree_lock_alloc(unsigned depth, unsigned pbytes);
++/* free a lock handle */
++void htree_lock_free(struct htree_lock *lck);
++/* lock htree, when @wait is true, 0 is returned if the lock can't
++ * be granted immediately */
++int htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
++		   htree_lock_mode_t mode, int wait);
++/* unlock htree */
++void htree_unlock(struct htree_lock *lck);
++/* unlock and relock htree with @new_mode */
++int htree_change_lock_try(struct htree_lock *lck,
++			  htree_lock_mode_t new_mode, int wait);
++void htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode);
++/* require child lock (key) of htree at level @dep, @event will be sent to all
++ * listeners on this @key while lock being granted */
++int htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
++			u32 key, unsigned dep, int wait, void *event);
++/* release child lock at level @dep, this lock will listen on it's key
++ * if @event isn't NULL, event_cb will be called against @lck while granting
++ * any other lock at level @dep with the same key */
++void htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event);
++/* stop listening on child lock at level @dep */
++void htree_node_stop_listen(struct htree_lock *lck, unsigned dep);
++/* for debug */
++void htree_lock_stat_print(int depth);
++void htree_lock_stat_reset(void);
++
++#define htree_lock(lck, lh, mode)	htree_lock_try(lck, lh, mode, 1)
++#define htree_change_lock(lck, mode)	htree_change_lock_try(lck, mode, 1)
++
++#define htree_lock_mode(lck)		((lck)->lk_mode)
++
++#define htree_node_lock(lck, mode, key, dep)	\
++	htree_node_lock_try(lck, mode, key, dep, 1, NULL)
++/* this is only safe in thread context of lock owner */
++#define htree_node_is_granted(lck, dep)		\
++	((lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_INVAL && \
++	 (lck)->lk_nodes[dep].ln_mode != HTREE_LOCK_NL)
++/* this is only safe in thread context of lock owner */
++#define htree_node_is_listening(lck, dep)	\
++	((lck)->lk_nodes[dep].ln_mode == HTREE_LOCK_NL)
++
++#endif
+Index: linux-source-2.6.32/fs/ext4/htree_lock.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ linux-source-2.6.32/fs/ext4/htree_lock.c	2012-06-28 12:11:51.977663925 +0200
+@@ -0,0 +1,880 @@
++/*
++ * fs/ext4/htree_lock.c
++ *
++ * Copyright (c) 2011, Whamcloud, Inc.
++ *
++ * Author: Liang Zhen <liang at whamcloud.com>
++ */
++#include <linux/jbd2.h>
++#include <linux/hash.h>
++#include <linux/module.h>
++#include <linux/htree_lock.h>
++
++enum {
++	HTREE_LOCK_BIT_EX	= (1 << HTREE_LOCK_EX),
++	HTREE_LOCK_BIT_PW	= (1 << HTREE_LOCK_PW),
++	HTREE_LOCK_BIT_PR	= (1 << HTREE_LOCK_PR),
++	HTREE_LOCK_BIT_CW	= (1 << HTREE_LOCK_CW),
++	HTREE_LOCK_BIT_CR	= (1 << HTREE_LOCK_CR),
++};
++
++enum {
++	HTREE_LOCK_COMPAT_EX	= 0,
++	HTREE_LOCK_COMPAT_PW	= HTREE_LOCK_COMPAT_EX | HTREE_LOCK_BIT_CR,
++	HTREE_LOCK_COMPAT_PR	= HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_PR,
++	HTREE_LOCK_COMPAT_CW	= HTREE_LOCK_COMPAT_PW | HTREE_LOCK_BIT_CW,
++	HTREE_LOCK_COMPAT_CR	= HTREE_LOCK_COMPAT_CW | HTREE_LOCK_BIT_PR |
++				  HTREE_LOCK_BIT_PW,
++};
++
++static int htree_lock_compat[] = {
++	[HTREE_LOCK_EX]		HTREE_LOCK_COMPAT_EX,
++	[HTREE_LOCK_PW]		HTREE_LOCK_COMPAT_PW,
++	[HTREE_LOCK_PR]		HTREE_LOCK_COMPAT_PR,
++	[HTREE_LOCK_CW]		HTREE_LOCK_COMPAT_CW,
++	[HTREE_LOCK_CR]		HTREE_LOCK_COMPAT_CR,
++};
++
++/* max allowed htree-lock depth.
++ * We only need depth=3 for ext4 although user can have higher value. */
++#define HTREE_LOCK_DEP_MAX	16
++
++#ifdef HTREE_LOCK_DEBUG
++
++static char *hl_name[] = {
++	[HTREE_LOCK_EX]		"EX",
++	[HTREE_LOCK_PW]		"PW",
++	[HTREE_LOCK_PR]		"PR",
++	[HTREE_LOCK_CW]		"CW",
++	[HTREE_LOCK_CR]		"CR",
++};
++
++/* lock stats */
++struct htree_lock_node_stats {
++	unsigned long long	blocked[HTREE_LOCK_MAX];
++	unsigned long long	granted[HTREE_LOCK_MAX];
++	unsigned long long	retried[HTREE_LOCK_MAX];
++	unsigned long long	events;
++};
++
++struct htree_lock_stats {
++	struct htree_lock_node_stats	nodes[HTREE_LOCK_DEP_MAX];
++	unsigned long long	granted[HTREE_LOCK_MAX];
++	unsigned long long	blocked[HTREE_LOCK_MAX];
++};
++
++static struct htree_lock_stats hl_stats;
++
++void htree_lock_stat_reset(void)
++{
++	memset(&hl_stats, 0, sizeof(hl_stats));
++}
++
++void htree_lock_stat_print(int depth)
++{
++	int     i;
++	int	j;
++
++	printk(KERN_DEBUG "HTREE LOCK STATS:\n");
++	for (i = 0; i < HTREE_LOCK_MAX; i++) {
++		printk(KERN_DEBUG "[%s]: G [%10llu], B [%10llu]\n",
++		       hl_name[i], hl_stats.granted[i], hl_stats.blocked[i]);
++	}
++	for (i = 0; i < depth; i++) {
++		printk(KERN_DEBUG "HTREE CHILD [%d] STATS:\n", i);
++		for (j = 0; j < HTREE_LOCK_MAX; j++) {
++			printk(KERN_DEBUG
++				"[%s]: G [%10llu], B [%10llu], R [%10llu]\n",
++				hl_name[j], hl_stats.nodes[i].granted[j],
++				hl_stats.nodes[i].blocked[j],
++				hl_stats.nodes[i].retried[j]);
++		}
++	}
++}
++
++#define lk_grant_inc(m)       do { hl_stats.granted[m]++; } while (0)
++#define lk_block_inc(m)       do { hl_stats.blocked[m]++; } while (0)
++#define ln_grant_inc(d, m)    do { hl_stats.nodes[d].granted[m]++; } while (0)
++#define ln_block_inc(d, m)    do { hl_stats.nodes[d].blocked[m]++; } while (0)
++#define ln_retry_inc(d, m)    do { hl_stats.nodes[d].retried[m]++; } while (0)
++#define ln_event_inc(d)       do { hl_stats.nodes[d].events++; } while (0)
++
++#else /* !DEBUG */
++
++void htree_lock_stat_reset(void) {}
++void htree_lock_stat_print(int depth) {}
++
++#define lk_grant_inc(m)	      do {} while (0)
++#define lk_block_inc(m)	      do {} while (0)
++#define ln_grant_inc(d, m)    do {} while (0)
++#define ln_block_inc(d, m)    do {} while (0)
++#define ln_retry_inc(d, m)    do {} while (0)
++#define ln_event_inc(d)	      do {} while (0)
++
++#endif /* DEBUG */
++
++EXPORT_SYMBOL(htree_lock_stat_reset);
++EXPORT_SYMBOL(htree_lock_stat_print);
++
++#define HTREE_DEP_ROOT		  (-1)
++
++#define htree_spin_lock(lhead, dep)				\
++	bit_spin_lock((dep) + 1, &(lhead)->lh_lock)
++#define htree_spin_unlock(lhead, dep)				\
++	bit_spin_unlock((dep) + 1, &(lhead)->lh_lock)
++
++#define htree_key_event_ignore(child, ln)			\
++	(!((child)->lc_events & (1 << (ln)->ln_mode)))
++
++static int
++htree_key_list_empty(struct htree_lock_node *ln)
++{
++	return list_empty(&ln->ln_major_list) && list_empty(&ln->ln_minor_list);
++}
++
++static void
++htree_key_list_del_init(struct htree_lock_node *ln)
++{
++	struct htree_lock_node *tmp = NULL;
++
++	if (!list_empty(&ln->ln_minor_list)) {
++		tmp = list_entry(ln->ln_minor_list.next,
++				 struct htree_lock_node, ln_minor_list);
++		list_del_init(&ln->ln_minor_list);
++	}
++
++	if (list_empty(&ln->ln_major_list))
++		return;
++
++	if (tmp == NULL) { /* not on minor key list */
++		list_del_init(&ln->ln_major_list);
++	} else {
++		BUG_ON(!list_empty(&tmp->ln_major_list));
++		list_replace_init(&ln->ln_major_list, &tmp->ln_major_list);
++	}
++}
++
++static void
++htree_key_list_replace_init(struct htree_lock_node *old,
++			    struct htree_lock_node *new)
++{
++	if (!list_empty(&old->ln_major_list))
++		list_replace_init(&old->ln_major_list, &new->ln_major_list);
++
++	if (!list_empty(&old->ln_minor_list))
++		list_replace_init(&old->ln_minor_list, &new->ln_minor_list);
++}
++
++static void
++htree_key_event_enqueue(struct htree_lock_child *child,
++			struct htree_lock_node *ln, int dep, void *event)
++{
++	struct htree_lock_node *tmp;
++
++	/* NB: ALWAYS called holding lhead::lh_lock(dep) */
++	BUG_ON(ln->ln_mode == HTREE_LOCK_NL);
++	if (event == NULL || htree_key_event_ignore(child, ln))
++		return;
++
++	/* shouldn't be a very long list */
++	list_for_each_entry(tmp, &ln->ln_alive_list, ln_alive_list) {
++		if (tmp->ln_mode == HTREE_LOCK_NL) {
++			ln_event_inc(dep);
++			if (child->lc_callback != NULL)
++				child->lc_callback(tmp->ln_ev_target, event);
++		}
++	}
++}
++
++static int
++htree_node_lock_enqueue(struct htree_lock *newlk, struct htree_lock *curlk,
++			unsigned dep, int wait, void *event)
++{
++	struct htree_lock_child *child = &newlk->lk_head->lh_children[dep];
++	struct htree_lock_node *newln = &newlk->lk_nodes[dep];
++	struct htree_lock_node *curln = &curlk->lk_nodes[dep];
++
++	/* NB: ALWAYS called holding lhead::lh_lock(dep) */
++	/* NB: we only expect PR/PW lock mode at here, only these two modes are
++	 * allowed for htree_node_lock(asserted in htree_node_lock_internal),
++	 * NL is only used for listener, user can't directly require NL mode */
++	if ((curln->ln_mode == HTREE_LOCK_NL) ||
++	    (curln->ln_mode != HTREE_LOCK_PW &&
++	     newln->ln_mode != HTREE_LOCK_PW)) {
++		/* no conflict, attach it on granted list of @curlk */
++		if (curln->ln_mode != HTREE_LOCK_NL) {
++			list_add(&newln->ln_granted_list,
++				 &curln->ln_granted_list);
++		} else {
++			/* replace key owner */
++			htree_key_list_replace_init(curln, newln);
++		}
++
++		list_add(&newln->ln_alive_list, &curln->ln_alive_list);
++		htree_key_event_enqueue(child, newln, dep, event);
++		ln_grant_inc(dep, newln->ln_mode);
++		return 1; /* still hold lh_lock */
++	}
++
++	if (!wait) { /* can't grant and don't want to wait */
++		ln_retry_inc(dep, newln->ln_mode);
++		newln->ln_mode = HTREE_LOCK_INVAL;
++		return -1; /* don't wait and just return -1 */
++	}
++
++	newlk->lk_task = current;
++	set_current_state(TASK_UNINTERRUPTIBLE);
++	/* conflict, attach it on blocked list of curlk */
++	list_add_tail(&newln->ln_blocked_list, &curln->ln_blocked_list);
++	list_add(&newln->ln_alive_list, &curln->ln_alive_list);
++	ln_block_inc(dep, newln->ln_mode);
++
++	htree_spin_unlock(newlk->lk_head, dep);
++	/* wait to be given the lock */
++	if (newlk->lk_task != NULL)
++		schedule();
++	/* granted, no doubt, wake up will set me RUNNING */
++	if (event == NULL || htree_key_event_ignore(child, newln))
++		return 0; /* granted without lh_lock */
++
++	htree_spin_lock(newlk->lk_head, dep);
++	htree_key_event_enqueue(child, newln, dep, event);
++	return 1; /* still hold lh_lock */
++}
++
++/*
++ * get PR/PW access to particular tree-node according to @dep and @key,
++ * it will return -1 if @wait is false and can't immediately grant this lock.
++ * All listeners(HTREE_LOCK_NL) on @dep and with the same @key will get
++ * @event if it's not NULL.
++ * NB: ALWAYS called holding lhead::lh_lock
++ */
++static int
++htree_node_lock_internal(struct htree_lock_head *lhead, struct htree_lock *lck,
++			 htree_lock_mode_t mode, u32 key, unsigned dep,
++			 int wait, void *event)
++{
++	LIST_HEAD		(list);
++	struct htree_lock	*tmp;
++	struct htree_lock	*tmp2;
++	u16			major;
++	u16			minor;
++	u8			reverse;
++	u8			ma_bits;
++	u8			mi_bits;
++
++	BUG_ON(mode != HTREE_LOCK_PW && mode != HTREE_LOCK_PR);
++	BUG_ON(htree_node_is_granted(lck, dep));
++
++	key = hash_long(key, lhead->lh_hbits);
++
++	mi_bits = lhead->lh_hbits >> 1;
++	ma_bits = lhead->lh_hbits - mi_bits;
++
++	lck->lk_nodes[dep].ln_major_key = major = key & ((1U << ma_bits) - 1);
++	lck->lk_nodes[dep].ln_minor_key = minor = key >> ma_bits;
++	lck->lk_nodes[dep].ln_mode = mode;
++
++	/*
++	 * The major key list is an ordered list, so searches are started
++	 * at the end of the list that is numerically closer to major_key,
++	 * so at most half of the list will be walked (for well-distributed
++	 * keys). The list traversal aborts early if the expected key
++	 * location is passed.
++	 */
++	reverse = (major >= (1 << (ma_bits - 1)));
++
++	if (reverse) {
++		list_for_each_entry_reverse(tmp,
++					&lhead->lh_children[dep].lc_list,
++					lk_nodes[dep].ln_major_list) {
++			if (tmp->lk_nodes[dep].ln_major_key == major) {
++				goto search_minor;
++
++			} else if (tmp->lk_nodes[dep].ln_major_key < major) {
++				/* attach _after_ @tmp */
++				list_add(&lck->lk_nodes[dep].ln_major_list,
++					 &tmp->lk_nodes[dep].ln_major_list);
++				goto out_grant_major;
++			}
++		}
++
++		list_add(&lck->lk_nodes[dep].ln_major_list,
++			 &lhead->lh_children[dep].lc_list);
++		goto out_grant_major;
++
++	} else {
++		list_for_each_entry(tmp, &lhead->lh_children[dep].lc_list,
++				    lk_nodes[dep].ln_major_list) {
++			if (tmp->lk_nodes[dep].ln_major_key == major) {
++				goto search_minor;
++
++			} else if (tmp->lk_nodes[dep].ln_major_key > major) {
++				/* insert _before_ @tmp */
++				list_add_tail(&lck->lk_nodes[dep].ln_major_list,
++					&tmp->lk_nodes[dep].ln_major_list);
++				goto out_grant_major;
++			}
++		}
++
++		list_add_tail(&lck->lk_nodes[dep].ln_major_list,
++			      &lhead->lh_children[dep].lc_list);
++		goto out_grant_major;
++	}
++
++ search_minor:
++	/*
++	 * NB: minor_key list doesn't have a "head", @list is just a
++	 * temporary stub for helping list searching, make sure it's removed
++	 * after searching.
++	 * minor_key list is an ordered list too.
++	 */
++	list_add_tail(&list, &tmp->lk_nodes[dep].ln_minor_list);
++
++	reverse = (minor >= (1 << (mi_bits - 1)));
++
++	if (reverse) {
++		list_for_each_entry_reverse(tmp2, &list,
++					    lk_nodes[dep].ln_minor_list) {
++			if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
++				goto out_enqueue;
++
++			} else if (tmp2->lk_nodes[dep].ln_minor_key < minor) {
++				/* attach _after_ @tmp2 */
++				list_add(&lck->lk_nodes[dep].ln_minor_list,
++					 &tmp2->lk_nodes[dep].ln_minor_list);
++				goto out_grant_minor;
++			}
++		}
++
++		list_add(&lck->lk_nodes[dep].ln_minor_list, &list);
++
++	} else {
++		list_for_each_entry(tmp2, &list,
++				    lk_nodes[dep].ln_minor_list) {
++			if (tmp2->lk_nodes[dep].ln_minor_key == minor) {
++				goto out_enqueue;
++
++			} else if (tmp2->lk_nodes[dep].ln_minor_key > minor) {
++				/* insert _before_ @tmp2 */
++				list_add_tail(&lck->lk_nodes[dep].ln_minor_list,
++					&tmp2->lk_nodes[dep].ln_minor_list);
++				goto out_grant_minor;
++			}
++		}
++
++		list_add_tail(&lck->lk_nodes[dep].ln_minor_list, &list);
++	}
++
++ out_grant_minor:
++	if (list.next == &lck->lk_nodes[dep].ln_minor_list) {
++		/* new lock @lck is the first one on minor_key list, which
++		 * means it has the smallest minor_key and it should
++		 * replace @tmp as minor_key owner */
++		list_replace_init(&tmp->lk_nodes[dep].ln_major_list,
++				  &lck->lk_nodes[dep].ln_major_list);
++	}
++	/* remove the temporary head */
++	list_del(&list);
++
++ out_grant_major:
++	ln_grant_inc(dep, lck->lk_nodes[dep].ln_mode);
++	return 1; /* granted with holding lh_lock */
++
++ out_enqueue:
++	list_del(&list); /* remove temprary head */
++	return htree_node_lock_enqueue(lck, tmp2, dep, wait, event);
++}
++
++/*
++ * release the key of @lck at level @dep, and grant any blocked locks.
++ * caller will still listen on @key if @event is not NULL, which means
++ * caller can see a event (by event_cb) while granting any lock with
++ * the same key at level @dep.
++ * NB: ALWAYS called holding lhead::lh_lock
++ * NB: listener will not block anyone because listening mode is HTREE_LOCK_NL
++ */
++static void
++htree_node_unlock_internal(struct htree_lock_head *lhead,
++			   struct htree_lock *curlk, unsigned dep, void *event)
++{
++	struct htree_lock_node	*curln = &curlk->lk_nodes[dep];
++	struct htree_lock	*grtlk = NULL;
++	struct htree_lock_node	*grtln;
++	struct htree_lock	*poslk;
++	struct htree_lock	*tmplk;
++
++	if (!htree_node_is_granted(curlk, dep))
++		return;
++
++	if (!list_empty(&curln->ln_granted_list)) {
++		/* there is another granted lock */
++		grtlk = list_entry(curln->ln_granted_list.next,
++				   struct htree_lock,
++				   lk_nodes[dep].ln_granted_list);
++		list_del_init(&curln->ln_granted_list);
++	}
++
++	if (grtlk == NULL && !list_empty(&curln->ln_blocked_list)) {
++		/*
++		 * @curlk is the only granted lock, so we confirmed:
++		 * a) curln is key owner (attached on major/minor_list),
++		 *    so if there is any blocked lock, it should be attached
++		 *    on curln->ln_blocked_list
++		 * b) we always can grant the first blocked lock
++		 */
++		grtlk = list_entry(curln->ln_blocked_list.next,
++				   struct htree_lock,
++				   lk_nodes[dep].ln_blocked_list);
++		BUG_ON(grtlk->lk_task == NULL);
++		wake_up_process(grtlk->lk_task);
++	}
++
++	if (event != NULL &&
++	    lhead->lh_children[dep].lc_events != HTREE_EVENT_DISABLE) {
++		curln->ln_ev_target = event;
++		curln->ln_mode = HTREE_LOCK_NL; /* listen! */
++	} else {
++		curln->ln_mode = HTREE_LOCK_INVAL;
++	}
++
++	if (grtlk == NULL) { /* I must be the only one locking this key */
++		struct htree_lock_node *tmpln;
++
++		BUG_ON(htree_key_list_empty(curln));
++
++		if (curln->ln_mode == HTREE_LOCK_NL) /* listening */
++			return;
++
++		/* not listening */
++		if (list_empty(&curln->ln_alive_list)) { /* no more listener */
++			htree_key_list_del_init(curln);
++			return;
++		}
++
++		tmpln = list_entry(curln->ln_alive_list.next,
++				   struct htree_lock_node, ln_alive_list);
++
++		BUG_ON(tmpln->ln_mode != HTREE_LOCK_NL);
++
++		htree_key_list_replace_init(curln, tmpln);
++		list_del_init(&curln->ln_alive_list);
++
++		return;
++	}
++
++	/* have a granted lock */
++	grtln = &grtlk->lk_nodes[dep];
++	if (!list_empty(&curln->ln_blocked_list)) {
++		/* only key owner can be on both lists */
++		BUG_ON(htree_key_list_empty(curln));
++
++		if (list_empty(&grtln->ln_blocked_list)) {
++			list_add(&grtln->ln_blocked_list,
++				 &curln->ln_blocked_list);
++		}
++		list_del_init(&curln->ln_blocked_list);
++	}
++	/*
++	 * NB: this is the tricky part:
++	 * We have only two modes for child-lock (PR and PW), also,
++	 * only owner of the key (attached on major/minor_list) can be on
++	 * both blocked_list and granted_list, so @grtlk must be one
++	 * of these two cases:
++	 *
++	 * a) @grtlk is taken from granted_list, which means we've granted
++	 *    more than one lock so @grtlk has to be PR, the first blocked
++	 *    lock must be PW and we can't grant it at all.
++	 *    So even @grtlk is not owner of the key (empty blocked_list),
++	 *    we don't care because we can't grant any lock.
++	 * b) we just grant a new lock which is taken from head of blocked
++	 *    list, and it should be the first granted lock, and it should
++	 *    be the first one linked on blocked_list.
++	 *
++	 * Either way, we can get correct result by iterating blocked_list
++	 * of @grtlk, and don't have to bother on how to find out
++	 * owner of current key.
++	 */
++	list_for_each_entry_safe(poslk, tmplk, &grtln->ln_blocked_list,
++				 lk_nodes[dep].ln_blocked_list) {
++		if (grtlk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW ||
++		    poslk->lk_nodes[dep].ln_mode == HTREE_LOCK_PW)
++			break;
++		/* grant all readers */
++		list_del_init(&poslk->lk_nodes[dep].ln_blocked_list);
++		list_add(&poslk->lk_nodes[dep].ln_granted_list,
++			 &grtln->ln_granted_list);
++
++		BUG_ON(poslk->lk_task == NULL);
++		wake_up_process(poslk->lk_task);
++	}
++
++	/* if @curln is the owner of this key, replace it with @grtln */
++	if (!htree_key_list_empty(curln))
++		htree_key_list_replace_init(curln, grtln);
++
++	if (curln->ln_mode == HTREE_LOCK_INVAL)
++		list_del_init(&curln->ln_alive_list);
++}
++
++/*
++ * it's just wrapper of htree_node_lock_internal, it returns 1 on granted
++ * and 0 only if @wait is false and can't grant it immediately
++ */
++int
++htree_node_lock_try(struct htree_lock *lck, htree_lock_mode_t mode,
++		    u32 key, unsigned dep, int wait, void *event)
++{
++	struct htree_lock_head *lhead = lck->lk_head;
++	int rc;
++
++	BUG_ON(dep >= lck->lk_depth);
++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++	htree_spin_lock(lhead, dep);
++	rc = htree_node_lock_internal(lhead, lck, mode, key, dep, wait, event);
++	if (rc != 0)
++		htree_spin_unlock(lhead, dep);
++	return rc >= 0;
++}
++EXPORT_SYMBOL(htree_node_lock_try);
++
++/* it's wrapper of htree_node_unlock_internal */
++void
++htree_node_unlock(struct htree_lock *lck, unsigned dep, void *event)
++{
++	struct htree_lock_head *lhead = lck->lk_head;
++
++	BUG_ON(dep >= lck->lk_depth);
++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++	htree_spin_lock(lhead, dep);
++	htree_node_unlock_internal(lhead, lck, dep, event);
++	htree_spin_unlock(lhead, dep);
++}
++EXPORT_SYMBOL(htree_node_unlock);
++
++/* stop listening on child-lock level @dep */
++void
++htree_node_stop_listen(struct htree_lock *lck, unsigned dep)
++{
++	struct htree_lock_node *ln = &lck->lk_nodes[dep];
++	struct htree_lock_node *tmp;
++
++	BUG_ON(htree_node_is_granted(lck, dep));
++	BUG_ON(!list_empty(&ln->ln_blocked_list));
++	BUG_ON(!list_empty(&ln->ln_granted_list));
++
++	if (!htree_node_is_listening(lck, dep))
++		return;
++
++	htree_spin_lock(lck->lk_head, dep);
++	ln->ln_mode = HTREE_LOCK_INVAL;
++	ln->ln_ev_target = NULL;
++
++	if (htree_key_list_empty(ln)) { /* not owner */
++		list_del_init(&ln->ln_alive_list);
++		goto out;
++	}
++
++	/* I'm the owner... */
++	if (list_empty(&ln->ln_alive_list)) { /* no more listener */
++		htree_key_list_del_init(ln);
++		goto out;
++	}
++
++	tmp = list_entry(ln->ln_alive_list.next,
++			 struct htree_lock_node, ln_alive_list);
++
++	BUG_ON(tmp->ln_mode != HTREE_LOCK_NL);
++	htree_key_list_replace_init(ln, tmp);
++	list_del_init(&ln->ln_alive_list);
++ out:
++	htree_spin_unlock(lck->lk_head, dep);
++}
++EXPORT_SYMBOL(htree_node_stop_listen);
++
++/* release all child-locks if we have any */
++static void
++htree_node_release_all(struct htree_lock *lck)
++{
++	int	i;
++
++	for (i = 0; i < lck->lk_depth; i++) {
++		if (htree_node_is_granted(lck, i))
++			htree_node_unlock(lck, i, NULL);
++		else if (htree_node_is_listening(lck, i))
++			htree_node_stop_listen(lck, i);
++	}
++}
++
++/*
++ * obtain htree lock, it could be blocked inside if there's conflict
++ * with any granted or blocked lock and @wait is true.
++ * NB: ALWAYS called holding lhead::lh_lock
++ */
++static int
++htree_lock_internal(struct htree_lock *lck, int wait)
++{
++	struct htree_lock_head *lhead = lck->lk_head;
++	int	granted = 0;
++	int	blocked = 0;
++	int	i;
++
++	for (i = 0; i < HTREE_LOCK_MAX; i++) {
++		if (lhead->lh_ngranted[i] != 0)
++			granted |= 1 << i;
++		if (lhead->lh_nblocked[i] != 0)
++			blocked |= 1 << i;
++	}
++	if ((htree_lock_compat[lck->lk_mode] & granted) != granted ||
++	    (htree_lock_compat[lck->lk_mode] & blocked) != blocked) {
++		/* will block current lock even it just conflicts with any
++		 * other blocked lock, so lock like EX wouldn't starve */
++		if (!wait)
++			return -1;
++		lhead->lh_nblocked[lck->lk_mode]++;
++		lk_block_inc(lck->lk_mode);
++
++		lck->lk_task = current;
++		list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
++
++		set_current_state(TASK_UNINTERRUPTIBLE);
++		htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++		/* wait to be given the lock */
++		if (lck->lk_task != NULL)
++			schedule();
++		/* granted, no doubt. wake up will set me RUNNING */
++		return 0; /* without lh_lock */
++	}
++	lhead->lh_ngranted[lck->lk_mode]++;
++	lk_grant_inc(lck->lk_mode);
++	return 1;
++}
++
++/* release htree lock. NB: ALWAYS called holding lhead::lh_lock */
++static void
++htree_unlock_internal(struct htree_lock *lck)
++{
++	struct htree_lock_head *lhead = lck->lk_head;
++	struct htree_lock *tmp;
++	struct htree_lock *tmp2;
++	int granted = 0;
++	int i;
++
++	BUG_ON(lhead->lh_ngranted[lck->lk_mode] == 0);
++
++	lhead->lh_ngranted[lck->lk_mode]--;
++	lck->lk_mode = HTREE_LOCK_INVAL;
++
++	for (i = 0; i < HTREE_LOCK_MAX; i++) {
++		if (lhead->lh_ngranted[i] != 0)
++			granted |= 1 << i;
++	}
++	list_for_each_entry_safe(tmp, tmp2,
++				 &lhead->lh_blocked_list, lk_blocked_list) {
++		/* conflict with any granted lock? */
++		if ((htree_lock_compat[tmp->lk_mode] & granted) != granted)
++			break;
++
++		list_del_init(&tmp->lk_blocked_list);
++
++		BUG_ON(lhead->lh_nblocked[tmp->lk_mode] == 0);
++
++		lhead->lh_nblocked[tmp->lk_mode]--;
++		lhead->lh_ngranted[tmp->lk_mode]++;
++		granted |= 1 << tmp->lk_mode;
++
++		BUG_ON(tmp->lk_task == NULL);
++		wake_up_process(tmp->lk_task);
++	}
++}
++
++/* it's wrapper of htree_lock_internal and exported interface.
++ * It always return 1 with granted lock if @wait is true, it can return 0
++ * if @wait is false and locking request can't be granted immediately */
++int
++htree_lock_try(struct htree_lock *lck, struct htree_lock_head *lhead,
++	       htree_lock_mode_t mode, int wait)
++{
++	int	rc;
++
++	BUG_ON(lck->lk_depth > lhead->lh_depth);
++	BUG_ON(lck->lk_head != NULL);
++	BUG_ON(lck->lk_task != NULL);
++
++	lck->lk_head = lhead;
++	lck->lk_mode = mode;
++
++	htree_spin_lock(lhead, HTREE_DEP_ROOT);
++	rc = htree_lock_internal(lck, wait);
++	if (rc != 0)
++		htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++	return rc >= 0;
++}
++EXPORT_SYMBOL(htree_lock_try);
++
++/* it's wrapper of htree_unlock_internal and exported interface.
++ * It will release all htree_node_locks and htree_lock */
++void
++htree_unlock(struct htree_lock *lck)
++{
++	BUG_ON(lck->lk_head == NULL);
++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++
++	htree_node_release_all(lck);
++
++	htree_spin_lock(lck->lk_head, HTREE_DEP_ROOT);
++	htree_unlock_internal(lck);
++	htree_spin_unlock(lck->lk_head, HTREE_DEP_ROOT);
++	lck->lk_head = NULL;
++	lck->lk_task = NULL;
++}
++EXPORT_SYMBOL(htree_unlock);
++
++/* change lock mode */
++void
++htree_change_mode(struct htree_lock *lck, htree_lock_mode_t mode)
++{
++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL);
++	lck->lk_mode = mode;
++}
++EXPORT_SYMBOL(htree_change_mode);
++
++/* release htree lock, and lock it again with new mode.
++ * This function will first release all htree_node_locks and htree_lock,
++ * then try to gain htree_lock with new @mode.
++ * It always return 1 with granted lock if @wait is true, it can return 0
++ * if @wait is false and locking request can't be granted immediately */
++int
++htree_change_lock_try(struct htree_lock *lck, htree_lock_mode_t mode, int wait)
++{
++	struct htree_lock_head *lhead = lck->lk_head;
++	int rc;
++
++	BUG_ON(lhead == NULL);
++	BUG_ON(lck->lk_mode == mode);
++	BUG_ON(lck->lk_mode == HTREE_LOCK_INVAL || mode == HTREE_LOCK_INVAL);
++
++	htree_node_release_all(lck);
++
++	htree_spin_lock(lhead, HTREE_DEP_ROOT);
++	htree_unlock_internal(lck);
++	lck->lk_mode = mode;
++	rc = htree_lock_internal(lck, wait);
++	if (rc != 0)
++		htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++	return rc >= 0;
++}
++EXPORT_SYMBOL(htree_change_lock_try);
++
++/* create a htree_lock head with @depth levels (number of child-locks),
++ * it is a per resoruce structure */
++struct htree_lock_head *
++htree_lock_head_alloc(unsigned depth, unsigned hbits, unsigned priv)
++{
++	struct htree_lock_head *lhead;
++	int  i;
++
++	if (depth > HTREE_LOCK_DEP_MAX) {
++		printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
++			depth, HTREE_LOCK_DEP_MAX);
++		return NULL;
++	}
++
++	lhead = kzalloc(offsetof(struct htree_lock_head,
++				 lh_children[depth]) + priv, GFP_NOFS);
++	if (lhead == NULL)
++		return NULL;
++
++	if (hbits < HTREE_HBITS_MIN)
++		lhead->lh_hbits = HTREE_HBITS_MIN;
++	else if (hbits > HTREE_HBITS_MAX)
++		lhead->lh_hbits = HTREE_HBITS_MAX;
++
++	lhead->lh_lock = 0;
++	lhead->lh_depth = depth;
++	INIT_LIST_HEAD(&lhead->lh_blocked_list);
++	if (priv > 0) {
++		lhead->lh_private = (void *)lhead +
++			offsetof(struct htree_lock_head, lh_children[depth]);
++	}
++
++	for (i = 0; i < depth; i++) {
++		INIT_LIST_HEAD(&lhead->lh_children[i].lc_list);
++		lhead->lh_children[i].lc_events = HTREE_EVENT_DISABLE;
++	}
++	return lhead;
++}
++EXPORT_SYMBOL(htree_lock_head_alloc);
++
++/* free the htree_lock head */
++void
++htree_lock_head_free(struct htree_lock_head *lhead)
++{
++	int     i;
++
++	BUG_ON(!list_empty(&lhead->lh_blocked_list));
++	for (i = 0; i < lhead->lh_depth; i++)
++		BUG_ON(!list_empty(&lhead->lh_children[i].lc_list));
++	kfree(lhead);
++}
++EXPORT_SYMBOL(htree_lock_head_free);
++
++/* register event callback for @events of child-lock at level @dep */
++void
++htree_lock_event_attach(struct htree_lock_head *lhead, unsigned dep,
++			unsigned events, htree_event_cb_t callback)
++{
++	BUG_ON(lhead->lh_depth <= dep);
++	lhead->lh_children[dep].lc_events = events;
++	lhead->lh_children[dep].lc_callback = callback;
++}
++EXPORT_SYMBOL(htree_lock_event_attach);
++
++/* allocate a htree_lock, which is per-thread structure, @pbytes is some
++ * extra-bytes as private data for caller */
++struct htree_lock *
++htree_lock_alloc(unsigned depth, unsigned pbytes)
++{
++	struct htree_lock *lck;
++	int i = offsetof(struct htree_lock, lk_nodes[depth]);
++
++	if (depth > HTREE_LOCK_DEP_MAX) {
++		printk(KERN_ERR "%d is larger than max htree_lock depth %d\n",
++			depth, HTREE_LOCK_DEP_MAX);
++		return NULL;
++	}
++	lck = kzalloc(i + pbytes, GFP_NOFS);
++	if (lck == NULL)
++		return NULL;
++
++	if (pbytes != 0)
++		lck->lk_private = (void *)lck + i;
++	lck->lk_mode = HTREE_LOCK_INVAL;
++	lck->lk_depth = depth;
++	INIT_LIST_HEAD(&lck->lk_blocked_list);
++
++	for (i = 0; i < depth; i++) {
++		struct htree_lock_node *node = &lck->lk_nodes[i];
++
++		node->ln_mode = HTREE_LOCK_INVAL;
++		INIT_LIST_HEAD(&node->ln_major_list);
++		INIT_LIST_HEAD(&node->ln_minor_list);
++		INIT_LIST_HEAD(&node->ln_alive_list);
++		INIT_LIST_HEAD(&node->ln_blocked_list);
++		INIT_LIST_HEAD(&node->ln_granted_list);
++	}
++
++	return lck;
++}
++EXPORT_SYMBOL(htree_lock_alloc);
++
++/* free htree_lock node */
++void
++htree_lock_free(struct htree_lock *lck)
++{
++	BUG_ON(lck->lk_mode != HTREE_LOCK_INVAL);
++	kfree(lck);
++}
++EXPORT_SYMBOL(htree_lock_free);
+Index: linux-source-2.6.32/fs/ext4/ext4.h
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/ext4.h	2012-06-28 12:11:34.597665360 +0200
++++ linux-source-2.6.32/fs/ext4/ext4.h	2012-06-28 12:11:51.977663925 +0200
+@@ -28,6 +28,7 @@
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/wait.h>
++#include <linux/htree_lock.h>
+ #include <linux/blockgroup_lock.h>
+ #include <linux/percpu_counter.h>
+ #ifdef __KERNEL__
+@@ -1269,6 +1270,7 @@
+ #define EXT4_FEATURE_INCOMPAT_FLEX_BG		0x0200
+ #define EXT4_FEATURE_INCOMPAT_EA_INODE		0x0400
+ #define EXT4_FEATURE_INCOMPAT_DIRDATA		0x1000
++#define EXT4_FEATURE_INCOMPAT_LARGEDIR		0x4000
+ 
+ #define EXT4_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
+ #define EXT4_FEATURE_INCOMPAT_SUPP	(EXT4_FEATURE_INCOMPAT_FILETYPE| \
+@@ -1279,7 +1281,8 @@
+ 					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+ 					 EXT4_FEATURE_INCOMPAT_EA_INODE| \
+ 					 EXT4_FEATURE_INCOMPAT_MMP| \
+-					 EXT4_FEATURE_INCOMPAT_DIRDATA)
++					 EXT4_FEATURE_INCOMPAT_DIRDATA| \
++					 EXT4_FEATURE_INCOMPAT_LARGEDIR)
+ 
+ #define EXT4_FEATURE_RO_COMPAT_SUPP	(EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+ 					 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+@@ -1496,6 +1499,76 @@
+  */
+ #define ERR_BAD_DX_DIR	-75000
+ 
++/* htree levels for ext4 */
++#define EXT4_HTREE_LEVEL_COMPAT 2
++#define EXT4_HTREE_LEVEL	3
++
++static inline int
++ext4_dir_htree_level(struct super_block *sb)
++{
++	return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ?
++		EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
++}
++
++/* assume name-hash is protected by upper layer */
++#define EXT4_HTREE_LOCK_HASH	0
++
++enum ext4_pdo_lk_types {
++#if EXT4_HTREE_LOCK_HASH
++	EXT4_LK_HASH,
++#endif
++	EXT4_LK_DX,		/* index block */
++	EXT4_LK_DE,		/* directory entry block */
++	EXT4_LK_SPIN,		/* spinlock */
++	EXT4_LK_MAX,
++};
++
++/* read-only bit */
++#define EXT4_LB_RO(b)		(1 << (b))
++/* read + write, high bits for writer */
++#define EXT4_LB_RW(b)		((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
++
++enum ext4_pdo_lock_bits {
++	/* DX lock bits */
++	EXT4_LB_DX_RO		= EXT4_LB_RO(EXT4_LK_DX),
++	EXT4_LB_DX		= EXT4_LB_RW(EXT4_LK_DX),
++	/* DE lock bits */
++	EXT4_LB_DE_RO		= EXT4_LB_RO(EXT4_LK_DE),
++	EXT4_LB_DE		= EXT4_LB_RW(EXT4_LK_DE),
++	/* DX spinlock bits */
++	EXT4_LB_SPIN_RO		= EXT4_LB_RO(EXT4_LK_SPIN),
++	EXT4_LB_SPIN		= EXT4_LB_RW(EXT4_LK_SPIN),
++	/* accurate searching */
++	EXT4_LB_EXACT		= EXT4_LB_RO(EXT4_LK_MAX << 1),
++};
++
++enum ext4_pdo_lock_opc {
++	/* external */
++	EXT4_HLOCK_READDIR	= (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
++	EXT4_HLOCK_LOOKUP	= (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
++				   EXT4_LB_EXACT),
++	EXT4_HLOCK_DEL		= (EXT4_LB_DE | EXT4_LB_SPIN_RO |
++				   EXT4_LB_EXACT),
++	EXT4_HLOCK_ADD		= (EXT4_LB_DE | EXT4_LB_SPIN_RO),
++
++	/* internal */
++	EXT4_HLOCK_LOOKUP_SAFE	= (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
++				   EXT4_LB_EXACT),
++	EXT4_HLOCK_DEL_SAFE	= (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
++	EXT4_HLOCK_SPLIT	= (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
++};
++
++extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
++#define ext4_htree_lock_head_free(lhead)	htree_lock_head_free(lhead)
++
++extern struct htree_lock *ext4_htree_lock_alloc(void);
++#define ext4_htree_lock_free(lck)		htree_lock_free(lck)
++
++extern void ext4_htree_lock(struct htree_lock *lck,
++			    struct htree_lock_head *lhead,
++			    struct inode *dir, unsigned flags);
++#define ext4_htree_unlock(lck)                  htree_unlock(lck)
++
+ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
+ 			ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
+ 
+@@ -1735,14 +1808,16 @@
+ extern struct inode *ext4_create_inode(handle_t *handle,
+ 				       struct inode * dir, int mode);
+ extern int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+-			  struct inode *inode);
++			  struct inode *inode, struct htree_lock *lck);
+ extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
+ 			     struct ext4_dir_entry_2 * de_del,
+ 			     struct buffer_head * bh);
+ extern struct buffer_head * ext4_find_entry(struct inode *dir,
+ 					    const struct qstr *d_name,
+-					    struct ext4_dir_entry_2 ** res_dir);
+-#define ll_ext4_find_entry(inode, dentry, res_dir) ext4_find_entry(inode, &(dentry)->d_name, res_dir)
++					    struct ext4_dir_entry_2 **res_dir,
++					    struct htree_lock *lck);
++#define ll_ext4_find_entry(inode, dentry, res_dir, lck) \
++	ext4_find_entry(inode, &(dentry)->d_name, res_dir, lck)
+ extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
+ 			       struct inode *inode, const void *, const void *);
+ extern struct buffer_head *ext4_append(handle_t *handle,
+@@ -1852,13 +1927,15 @@
+ 	es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
+ }
+ 
+-static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
++static inline loff_t ext4_isize(struct super_block *sb,
++				struct ext4_inode *raw_inode)
+ {
+-	if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
++	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_LARGEDIR) ||
++	    S_ISREG(le16_to_cpu(raw_inode->i_mode)))
+ 		return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
+ 			le32_to_cpu(raw_inode->i_size_lo);
+-	else
+-		return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
++
++	return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
+ }
+ 
+ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
+Index: linux-source-2.6.32/fs/ext4/namei.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/namei.c	2012-06-28 12:11:31.057668333 +0200
++++ linux-source-2.6.32/fs/ext4/namei.c	2012-06-28 12:11:51.981664959 +0200
+@@ -176,7 +176,7 @@
+ 				 struct inode *dir,
+ 				 struct dx_hash_info *hinfo,
+ 				 struct dx_frame *frame,
+-				 int *err);
++				 struct htree_lock *lck, int *err);
+ static void dx_release(struct dx_frame *frames);
+ static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
+ 		       struct dx_hash_info *hinfo, struct dx_map_entry map[]);
+@@ -189,13 +189,13 @@
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+ 				 struct dx_frame *frame,
+ 				 struct dx_frame *frames,
+-				 __u32 *start_hash);
++				 __u32 *start_hash, struct htree_lock *lck);
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+ 		const struct qstr *d_name,
+ 		struct ext4_dir_entry_2 **res_dir,
+-		int *err);
++		struct htree_lock *lck, int *err);
+ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
+-			     struct inode *inode);
++			     struct inode *inode, struct htree_lock *lck);
+ 
+ unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
+ {
+@@ -249,7 +249,7 @@
+ 
+ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
+ {
+-	return le32_to_cpu(entry->block) & 0x00ffffff;
++	return le32_to_cpu(entry->block) & 0x0fffffff;
+ }
+ 
+ static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
+@@ -392,6 +392,223 @@
+ }
+ #endif /* DX_DEBUG */
+ 
++/* private data for htree_lock */
++struct ext4_dir_lock_data {
++	unsigned		ld_flags;  /* bits-map for lock types */
++	unsigned		ld_count;  /* # entries of the last DX block */
++	struct dx_entry		ld_at_entry; /* copy of leaf dx_entry */
++	struct dx_entry		*ld_at;	   /* position of leaf dx_entry */
++};
++
++#define ext4_htree_lock_data(l)	((struct ext4_dir_lock_data *)(l)->lk_private)
++
++/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
++#define EXT4_HTREE_NODE_CHANGED	(0xcafeULL << 32)
++
++static void ext4_htree_event_cb(void *target, void *event)
++{
++	u64 *block = (u64 *)target;
++
++	if (*block == dx_get_block((struct dx_entry *)event))
++		*block = EXT4_HTREE_NODE_CHANGED;
++}
++
++struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
++{
++	struct htree_lock_head *lhead;
++
++	lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
++	if (lhead != NULL) {
++		htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
++					ext4_htree_event_cb);
++	}
++	return lhead;
++}
++EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
++
++struct htree_lock *ext4_htree_lock_alloc(void)
++{
++	return htree_lock_alloc(EXT4_LK_MAX,
++				sizeof(struct ext4_dir_lock_data));
++}
++EXPORT_SYMBOL(ext4_htree_lock_alloc);
++
++static htree_lock_mode_t ext4_htree_mode(unsigned flags)
++{
++	switch (flags) {
++	default: /* 0 or unknown flags require EX lock */
++		return HTREE_LOCK_EX;
++	case EXT4_HLOCK_READDIR:
++		return HTREE_LOCK_PR;
++	case EXT4_HLOCK_LOOKUP:
++		return HTREE_LOCK_CR;
++	case EXT4_HLOCK_DEL:
++	case EXT4_HLOCK_ADD:
++		return HTREE_LOCK_CW;
++	}
++}
++
++/* return PR for read-only operations, otherwise return EX */
++static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
++{
++	int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
++
++	/* 0 requires EX lock */
++	return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
++}
++
++static int ext4_htree_safe_locked(struct htree_lock *lck)
++{
++	int writer;
++
++	if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
++		return 1;
++
++	writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
++		 EXT4_LB_DE;
++	if (writer) /* all readers & writers are excluded? */
++		return lck->lk_mode == HTREE_LOCK_EX;
++
++	/* all writers are excluded? */
++	return lck->lk_mode == HTREE_LOCK_PR ||
++	       lck->lk_mode == HTREE_LOCK_PW ||
++	       lck->lk_mode == HTREE_LOCK_EX;
++}
++
++/* relock htree_lock with EX mode if it's change operation, otherwise
++ * relock it with PR mode. It's noop if PDO is disabled. */
++static void ext4_htree_safe_relock(struct htree_lock *lck)
++{
++	if (!ext4_htree_safe_locked(lck)) {
++		unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
++
++		htree_change_lock(lck, ext4_htree_safe_mode(flags));
++	}
++}
++
++void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
++		     struct inode *dir, unsigned flags)
++{
++	htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
++					      ext4_htree_safe_mode(flags);
++
++	ext4_htree_lock_data(lck)->ld_flags = flags;
++	htree_lock(lck, lhead, mode);
++	if (!is_dx(dir))
++		ext4_htree_safe_relock(lck); /* make sure it's safe locked */
++}
++EXPORT_SYMBOL(ext4_htree_lock);
++
++static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
++				unsigned lmask, int wait, void *ev)
++{
++	u32	key = (at == NULL) ? 0 : dx_get_block(at);
++	u32	mode;
++
++	/* NOOP if htree is well protected or caller doesn't require the lock */
++	if (ext4_htree_safe_locked(lck) ||
++	   !(ext4_htree_lock_data(lck)->ld_flags & lmask))
++		return 1;
++
++	mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
++		HTREE_LOCK_PW : HTREE_LOCK_PR;
++	while (1) {
++		if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
++			return 1;
++		if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
++			return 0;
++		cpu_relax(); /* spin until granted */
++	}
++}
++
++static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
++{
++	return ext4_htree_safe_locked(lck) ||
++	       htree_node_is_granted(lck, ffz(~lmask));
++}
++
++static void ext4_htree_node_unlock(struct htree_lock *lck,
++				   unsigned lmask, void *buf)
++{
++	/* NB: it's safe to call mutiple times or even it's not locked */
++	if (!ext4_htree_safe_locked(lck) &&
++	     htree_node_is_granted(lck, ffz(~lmask)))
++		htree_node_unlock(lck, ffz(~lmask), buf);
++}
++
++#define ext4_htree_dx_lock(lck, key)		\
++	ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
++#define ext4_htree_dx_lock_try(lck, key)	\
++	ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
++#define ext4_htree_dx_unlock(lck)		\
++	ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
++#define ext4_htree_dx_locked(lck)		\
++	ext4_htree_node_locked(lck, EXT4_LB_DX)
++
++static void ext4_htree_dx_need_lock(struct htree_lock *lck)
++{
++	struct ext4_dir_lock_data *ld;
++
++	if (ext4_htree_safe_locked(lck))
++		return;
++
++	ld = ext4_htree_lock_data(lck);
++	switch (ld->ld_flags) {
++	default:
++		return;
++	case EXT4_HLOCK_LOOKUP:
++		ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
++		return;
++	case EXT4_HLOCK_DEL:
++		ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
++		return;
++	case EXT4_HLOCK_ADD:
++		ld->ld_flags = EXT4_HLOCK_SPLIT;
++		return;
++	}
++}
++
++#define ext4_htree_de_lock(lck, key)		\
++	ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
++#define ext4_htree_de_unlock(lck)		\
++	ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
++
++#define ext4_htree_spin_lock(lck, key, event)	\
++	ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
++#define ext4_htree_spin_unlock(lck)		\
++	ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
++#define ext4_htree_spin_unlock_listen(lck, p)	\
++	ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
++
++static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
++{
++	if (!ext4_htree_safe_locked(lck) &&
++	    htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
++		htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
++}
++
++enum {
++	DX_HASH_COL_IGNORE,	/* ignore collision while probing frames */
++	DX_HASH_COL_YES,	/* there is collision and it does matter */
++	DX_HASH_COL_NO,		/* there is no collision */
++};
++
++static int dx_probe_hash_collision(struct htree_lock *lck,
++				   struct dx_entry *entries,
++				   struct dx_entry *at, u32 hash)
++{
++	if (!(ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
++		return DX_HASH_COL_IGNORE; /* don't care about collision */
++
++	} else if (at == entries + dx_get_count(entries) - 1) {
++		return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
++
++	} else { /* hash collision? */
++		return ((dx_get_hash(at + 1) & ~1) == hash) ?
++			DX_HASH_COL_YES : DX_HASH_COL_NO;
++	}
++}
++
+ /*
+  * Probe for a directory leaf block to search.
+  *
+@@ -403,16 +620,17 @@
+  */
+ static struct dx_frame *
+ dx_probe(const struct qstr *d_name, struct inode *dir,
+-	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
++	 struct dx_hash_info *hinfo, struct dx_frame *frame_in,
++	 struct htree_lock *lck, int *err)
+ {
+ 	unsigned count, indirect;
+-	struct dx_entry *at, *entries, *p, *q, *m;
++	struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
+ 	struct dx_root_info * info;
+ 	struct buffer_head *bh;
+ 	struct dx_frame *frame = frame_in;
+ 	u32 hash;
+ 
+-	frame->bh = NULL;
++	memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
+ 	if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
+ 		goto fail;
+ 	info = dx_get_dx_info((struct ext4_dir_entry_2*)bh->b_data);
+@@ -443,10 +661,16 @@
+ 		goto fail;
+ 	}
+ 
+-	if ((indirect = info->indirect_levels) > 1) {
++	indirect = info->indirect_levels;
++	if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
+ 		ext4_warning(dir->i_sb, __func__,
+-			     "Unimplemented inode hash depth: %#06x",
+-			     info->indirect_levels);
++		"Directory (ino: %lu) htree depth %#06x exceed "
++		"supported value", dir->i_ino,
++		ext4_dir_htree_level(dir->i_sb));
++		if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
++			ext4_warning(dir->i_sb, __func__, "Enable large directory "
++			"feature to access it");
++		}
+ 		brelse(bh);
+ 		*err = ERR_BAD_DX_DIR;
+ 		goto fail;
+@@ -467,8 +691,15 @@
+ 	dxtrace(printk("Look up %x", hash));
+ 	while (1)
+ 	{
++		if (indirect == 0) { /* the last index level */
++			/* NB: ext4_htree_dx_lock() could be noop if
++			 * DX-lock flag is not set for current operation */
++			ext4_htree_dx_lock(lck, dx);
++			ext4_htree_spin_lock(lck, dx, NULL);
++		}
+ 		count = dx_get_count(entries);
+-		if (!count || count > dx_get_limit(entries)) {
++		if (count == 0 || count > dx_get_limit(entries)) {
++			ext4_htree_spin_unlock(lck); /* release spin */
+ 			ext4_warning(dir->i_sb, __func__,
+ 				     "dx entry: no count or count > limit");
+ 			brelse(bh);
+@@ -509,9 +740,73 @@
+ 		frame->bh = bh;
+ 		frame->entries = entries;
+ 		frame->at = at;
+-		if (!indirect--) return frame;
++
++		if (indirect == 0) { /* the last index level */
++			struct ext4_dir_lock_data *ld;
++			u64 myblock;
++
++			/* By default we only lock DE-block, however, we will
++			 * also lock the last level DX-block if:
++			 * a) there is hash collision
++			 *    we will set DX-lock flag (a few lines below)
++			 *    and redo to lock DX-block
++			 *    see detail in dx_probe_hash_collision()
++			 * b) it's a retry from splitting
++			 *    we need to lock the last level DX-block so nobody
++			 *    else can split any leaf blocks under the same
++			 *    DX-block, see detail in ext4_dx_add_entry()
++			 */
++			if (ext4_htree_dx_locked(lck)) {
++				/* DX-block is locked, just lock DE-block
++				 * and return */
++				ext4_htree_spin_unlock(lck);
++				if (!ext4_htree_safe_locked(lck))
++					ext4_htree_de_lock(lck, frame->at);
++				return frame;
++			}
++			/* it's pdirop and no DX lock */
++			if (dx_probe_hash_collision(lck, entries, at, hash) ==
++			    DX_HASH_COL_YES) {
++				/* found hash collision, set DX-lock flag
++				 * and retry to abtain DX-lock */
++				ext4_htree_spin_unlock(lck);
++				ext4_htree_dx_need_lock(lck);
++				continue;
++			}
++			ld = ext4_htree_lock_data(lck);
++			/* because I don't lock DX, so @at can't be trusted
++			 * after I release spinlock so I have to save it */
++			ld->ld_at = at;
++			ld->ld_at_entry = *at;
++			ld->ld_count = dx_get_count(entries);
++
++			frame->at = &ld->ld_at_entry;
++			myblock = dx_get_block(at);
++
++			/* NB: ordering locking */
++			ext4_htree_spin_unlock_listen(lck, &myblock);
++			/* other thread can split this DE-block because:
++			 * a) I don't have lock for the DE-block yet
++			 * b) I released spinlock on DX-block
++			 * if it happened I can detect it by listening
++			 * splitting event on this DE-block */
++			ext4_htree_de_lock(lck, frame->at);
++			ext4_htree_spin_stop_listen(lck);
++
++			if (myblock == EXT4_HTREE_NODE_CHANGED) {
++				/* someone split this DE-block before
++				 * I locked it, I need to retry and lock
++				 * valid DE-block */
++				ext4_htree_de_unlock(lck);
++				continue;
++			}
++			return frame;
++		}
++		dx = at;
++		indirect--;
+ 		if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
+ 			goto fail2;
++
+ 		at = entries = ((struct dx_node *) bh->b_data)->entries;
+ 		if (dx_get_limit(entries) != dx_node_limit (dir)) {
+ 			ext4_warning(dir->i_sb, __func__,
+@@ -539,13 +834,18 @@
+ static void dx_release (struct dx_frame *frames)
+ {
+ 	struct dx_root_info *info;
++	int i;
++
+ 	if (frames[0].bh == NULL)
+ 		return;
+ 
+ 	info = dx_get_dx_info((struct ext4_dir_entry_2*)frames[0].bh->b_data);
+-	if (info->indirect_levels)
+-		brelse(frames[1].bh);
+-	brelse(frames[0].bh);
++	for (i = 0; i <= info->indirect_levels; i++) {
++		if (frames[i].bh == NULL)
++			break;
++		brelse(frames[i].bh);
++		frames[i].bh = NULL;
++	}
+ }
+ 
+ /*
+@@ -568,7 +868,7 @@
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+ 				 struct dx_frame *frame,
+ 				 struct dx_frame *frames,
+-				 __u32 *start_hash)
++				 __u32 *start_hash, struct htree_lock *lck)
+ {
+ 	struct dx_frame *p;
+ 	struct buffer_head *bh;
+@@ -583,12 +883,22 @@
+ 	 * this loop, num_frames indicates the number of interior
+ 	 * nodes need to be read.
+ 	 */
++	ext4_htree_de_unlock(lck);
+ 	while (1) {
+-		if (++(p->at) < p->entries + dx_get_count(p->entries))
+-			break;
++		if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
++			/* num_frames > 0 :
++			 *   DX block
++			 * ext4_htree_dx_locked:
++			 *   frame->at is reliable pointer returned by dx_probe,
++			 *   otherwise dx_probe already knew no collision */
++			if (++(p->at) < p->entries + dx_get_count(p->entries))
++				break;
++		}
+ 		if (p == frames)
+ 			return 0;
+ 		num_frames++;
++		if (num_frames == 1)
++			ext4_htree_dx_unlock(lck);
+ 		p--;
+ 	}
+ 
+@@ -611,6 +921,13 @@
+ 	 * block so no check is necessary
+ 	 */
+ 	while (num_frames--) {
++		if (num_frames == 0) {
++			/* it's not always necessary, we just don't want to
++			 * detect hash collision again */
++			ext4_htree_dx_need_lock(lck);
++			ext4_htree_dx_lock(lck, p->at);
++		}
++
+ 		if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
+ 				      0, &err)))
+ 			return err; /* Failure */
+@@ -619,6 +936,7 @@
+ 		p->bh = bh;
+ 		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
+ 	}
++	ext4_htree_de_lock(lck, p->at);
+ 	return 1;
+ }
+ 
+@@ -688,7 +1006,7 @@
+ {
+ 	struct dx_hash_info hinfo;
+ 	struct ext4_dir_entry_2 *de;
+-	struct dx_frame frames[2], *frame;
++	struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+ 	struct inode *dir;
+ 	ext4_lblk_t block;
+ 	int count = 0;
+@@ -711,10 +1029,10 @@
+ 	}
+ 	hinfo.hash = start_hash;
+ 	hinfo.minor_hash = 0;
+-	frame = dx_probe(NULL, dir, &hinfo, frames, &err);
++	/* assume it's PR locked */
++	frame = dx_probe(NULL, dir, &hinfo, frames, NULL, &err);
+ 	if (!frame)
+ 		return err;
+-
+ 	/* Add '.' and '..' from the htree header */
+ 	if (!start_hash && !start_minor_hash) {
+ 		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
+@@ -741,7 +1059,7 @@
+ 		count += ret;
+ 		hashval = ~0;
+ 		ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
+-					    frame, frames, &hashval);
++					    frame, frames, &hashval, NULL);
+ 		*next_hash = hashval;
+ 		if (ret < 0) {
+ 			err = ret;
+@@ -841,9 +1159,17 @@
+ 
+ static void ext4_update_dx_flag(struct inode *inode)
+ {
++	/* Disable it for ldiskfs, because going from a DX directory to
++	 * a non-DX directory while it is in use will completely break
++	 * the htree-locking.
++	 * If we really want to support this operation in the future,
++	 * we need to exclusively lock the directory at here which will
++	 * increase complexity of code */
++#if 0
+ 	if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+ 				     EXT4_FEATURE_COMPAT_DIR_INDEX))
+ 		ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
++#endif
+ }
+ 
+ /*
+@@ -916,8 +1242,9 @@
+  * to brelse() it when appropriate.
+  */
+ struct buffer_head * ext4_find_entry(struct inode *dir,
+-				      const struct qstr *d_name,
+-				      struct ext4_dir_entry_2 ** res_dir)
++				     const struct qstr *d_name,
++				     struct ext4_dir_entry_2 **res_dir,
++				     struct htree_lock *lck)
+ {
+ 	struct super_block *sb;
+ 	struct buffer_head *bh_use[NAMEI_RA_SIZE];
+@@ -938,7 +1265,7 @@
+ 	if (namelen > EXT4_NAME_LEN)
+ 		return NULL;
+ 	if (is_dx(dir)) {
+-		bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
++		bh = ext4_dx_find_entry(dir, d_name, res_dir, lck, &err);
+ 		/*
+ 		 * On success, or if the error was file not found,
+ 		 * return.  Otherwise, fall back to doing a search the
+@@ -948,6 +1275,7 @@
+ 			return bh;
+ 		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
+ 			       "falling back\n"));
++		ext4_htree_safe_relock(lck);
+ 	}
+ 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+ 	start = EXT4_I(dir)->i_dir_start_lookup;
+@@ -1026,13 +1354,15 @@
+ }
+ EXPORT_SYMBOL(ext4_find_entry);
+ 
+-static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
+-		       struct ext4_dir_entry_2 **res_dir, int *err)
++static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
++				const struct qstr *d_name,
++				struct ext4_dir_entry_2 **res_dir,
++				struct htree_lock *lck, int *err)
+ {
+ 	struct super_block * sb;
+ 	struct dx_hash_info	hinfo;
+ 	u32 hash;
+-	struct dx_frame frames[2], *frame;
++	struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+ 	struct ext4_dir_entry_2 *de, *top;
+ 	struct buffer_head *bh;
+ 	ext4_lblk_t block;
+@@ -1043,13 +1373,16 @@
+ 	sb = dir->i_sb;
+ 	/* NFS may look up ".." - look at dx_root directory block */
+ 	if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
+-		if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
++		if (!(frame = dx_probe(d_name, dir, &hinfo, frames, lck, err)))
+ 			return NULL;
+ 	} else {
+ 		frame = frames;
+ 		frame->bh = NULL;			/* for dx_release() */
+ 		frame->at = (struct dx_entry *)frames;	/* hack for zero entry*/
+ 		dx_set_block(frame->at, 0);		/* dx_root block is 0 */
++		/* "." and ".." are stored in root DX lock */
++		ext4_htree_dx_need_lock(lck);
++		ext4_htree_dx_lock(lck, NULL);
+ 	}
+ 	hash = hinfo.hash;
+ 	do {
+@@ -1078,7 +1411,7 @@
+ 		brelse(bh);
+ 		/* Check to see if we should continue to search */
+ 		retval = ext4_htree_next_block(dir, hash, frame,
+-					       frames, NULL);
++					       frames, NULL, lck);
+ 		if (retval < 0) {
+ 			ext4_warning(sb, __func__,
+ 			     "error reading index page in directory #%lu",
+@@ -1104,7 +1437,7 @@
+ 	if (dentry->d_name.len > EXT4_NAME_LEN)
+ 		return ERR_PTR(-ENAMETOOLONG);
+ 
+-	bh = ext4_find_entry(dir, &dentry->d_name, &de);
++	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+ 	inode = NULL;
+ 	if (bh) {
+ 		__u32 ino = le32_to_cpu(de->inode);
+@@ -1173,7 +1506,7 @@
+ 	struct ext4_dir_entry_2 * de;
+ 	struct buffer_head *bh;
+ 
+-	bh = ext4_find_entry(child->d_inode, &dotdot, &de);
++	bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
+ 	inode = NULL;
+ 	if (!bh)
+ 		return ERR_PTR(-ENOENT);
+@@ -1262,8 +1595,9 @@
+  * Returns pointer to de in block into which the new entry will be inserted.
+  */
+ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+-			struct buffer_head **bh,struct dx_frame *frame,
+-			struct dx_hash_info *hinfo, int *error)
++			struct buffer_head **bh, struct dx_frame *frames,
++			struct dx_frame *frame, struct dx_hash_info *hinfo,
++			struct htree_lock *lck, int *error)
+ {
+ 	unsigned blocksize = dir->i_sb->s_blocksize;
+ 	unsigned count, continued;
+@@ -1320,7 +1654,14 @@
+ 					hash2, split, count-split));
+ 
+ 	/* Fancy dance to stay within two buffers */
+-	de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
++	if (hinfo->hash < hash2) {
++		de2 = dx_move_dirents(data1, data2, map + split,
++				      count - split, blocksize);
++	} else {
++		/* make sure we will add entry to the same block which
++		 * we have already locked */
++		de2 = dx_move_dirents(data1, data2, map, split, blocksize);
++	}
+ 	de = dx_pack_dirents(data1, blocksize);
+ 	de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
+ 					   blocksize);
+@@ -1329,13 +1670,21 @@
+ 	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
+ 	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
+ 
+-	/* Which block gets the new entry? */
+-	if (hinfo->hash >= hash2)
+-	{
+-		swap(*bh, bh2);
+-		de = de2;
++	ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
++			     frame->at); /* notify block is being split */
++	if (hinfo->hash < hash2) {
++		dx_insert_block(frame, hash2 + continued, newblock);
++
++	} else {
++		/* switch block number */
++		dx_insert_block(frame, hash2 + continued,
++				dx_get_block(frame->at));
++		dx_set_block(frame->at, newblock);
++		(frame->at)++;
+ 	}
+-	dx_insert_block(frame, hash2 + continued, newblock);
++	ext4_htree_spin_unlock(lck);
++	ext4_htree_dx_unlock(lck);
++
+ 	err = ext4_handle_dirty_metadata(handle, dir, bh2);
+ 	if (err)
+ 		goto journal_error;
+@@ -1447,7 +1796,7 @@
+ 	if (!IS_NOCMTIME(dir))
+ 		dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+ 	ext4_update_dx_flag(dir);
+-	dir->i_version++;
++	inode_inc_iversion(dir);
+ 	ext4_mark_inode_dirty(handle, dir);
+ 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ 	err = ext4_handle_dirty_metadata(handle, dir, bh);
+@@ -1467,7 +1816,7 @@
+ 	const char	*name = dentry->d_name.name;
+ 	int		namelen = dentry->d_name.len;
+ 	struct buffer_head *bh2;
+-	struct dx_frame	frames[2], *frame;
++	struct dx_frame	frames[EXT4_HTREE_LEVEL], *frame;
+ 	struct dx_entry *entries;
+ 	struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
+ 	char		*data1, *top;
+@@ -1545,7 +1894,7 @@
+ 	frame->at = entries;
+ 	frame->bh = bh;
+ 	bh = bh2;
+-	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
++	de = do_split(handle,dir, &bh, frames, frame, &hinfo, NULL, &retval);
+ 	dx_release (frames);
+ 	if (!(de))
+ 		return retval;
+@@ -1644,7 +1993,7 @@
+  * the entry, as someone else might have used it while you slept.
+  */
+ int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+-		   struct inode *inode)
++		   struct inode *inode, struct htree_lock *lck)
+ {
+ 	struct inode *dir = dentry->d_parent->d_inode;
+ 	struct buffer_head *bh;
+@@ -1663,9 +2012,10 @@
+ 		if (dentry->d_name.len == 2 &&
+ 		    memcmp(dentry->d_name.name, "..", 2) == 0)
+ 			return ext4_update_dotdot(handle, dentry, inode);
+-		retval = ext4_dx_add_entry(handle, dentry, inode);
++		retval = ext4_dx_add_entry(handle, dentry, inode, lck);
+ 		if (!retval || (retval != ERR_BAD_DX_DIR))
+ 			return retval;
++		ext4_htree_safe_relock(lck);
+ 		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+ 		dx_fallback++;
+ 		ext4_mark_inode_dirty(handle, dir);
+@@ -1704,18 +2054,21 @@
+  * Returns 0 for success, or a negative error value
+  */
+ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
+-			     struct inode *inode)
++			     struct inode *inode, struct htree_lock *lck)
+ {
+-	struct dx_frame frames[2], *frame;
++	struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+ 	struct dx_entry *entries, *at;
+ 	struct dx_hash_info hinfo;
+ 	struct buffer_head *bh;
+ 	struct inode *dir = dentry->d_parent->d_inode;
+ 	struct super_block *sb = dir->i_sb;
+ 	struct ext4_dir_entry_2 *de;
++	int restart;
+ 	int err;
+ 
+-	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
++again:
++	restart = 0;
++	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, lck, &err);
+ 	if (!frame)
+ 		return err;
+ 	entries = frame->entries;
+@@ -1729,29 +2082,59 @@
+ 	if (err)
+ 		goto journal_error;
+ 
++	BUFFER_TRACE(bh, "get_write_access");
++	err = ext4_journal_get_write_access(handle, bh);
++	if (err)
++		goto journal_error;
++
+ 	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+ 	if (err != -ENOSPC)
+ 		goto cleanup;
+ 
++	err = 0;
+ 	/* Block full, should compress but for now just split */
+ 	dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
+ 		       dx_get_count(entries), dx_get_limit(entries)));
+ 	/* Need to split index? */
+ 	if (dx_get_count(entries) == dx_get_limit(entries)) {
+ 		ext4_lblk_t newblock;
+-		unsigned icount = dx_get_count(entries);
+-		int levels = frame - frames;
++		int levels = frame - frames + 1;
++		unsigned icount;
++		int add_level = 1;
+ 		struct dx_entry *entries2;
+ 		struct dx_node *node2;
+ 		struct buffer_head *bh2;
+ 
+-		if (levels && (dx_get_count(frames->entries) ==
+-			       dx_get_limit(frames->entries))) {
+-			ext4_warning(sb, __func__,
+-				     "Directory index full!");
++		if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
++			ext4_htree_safe_relock(lck);
++			restart = 1;
++			goto cleanup;
++		}
++		while (frame > frames) {
++			if (dx_get_count((frame - 1)->entries) <
++			    dx_get_limit((frame - 1)->entries)) {
++				add_level = 0;
++				break;
++			}
++			frame--; /* split higher index block */
++			at = frame->at;
++			entries = frame->entries;
++			restart = 1;
++		}
++		
++		if (add_level && levels == ext4_dir_htree_level(sb)) {
++			ext4_warning(sb, __func__, "Directory (ino: %lu) index full, "
++					 "reach max htree level :%d",
++					 dir->i_ino, levels);
++			if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
++				ext4_warning(sb, __func__, "Large directory feature is"
++						 "not enabled on this "
++						 "filesystem");
++			}
+ 			err = -ENOSPC;
+ 			goto cleanup;
+ 		}
++		icount = dx_get_count(entries);
+ 		bh2 = ext4_append (handle, dir, &newblock, &err);
+ 		if (!(bh2))
+ 			goto cleanup;
+@@ -1764,7 +2147,7 @@
+ 		err = ext4_journal_get_write_access(handle, frame->bh);
+ 		if (err)
+ 			goto journal_error;
+-		if (levels) {
++		if (!add_level) {
+ 			unsigned icount1 = icount/2, icount2 = icount - icount1;
+ 			unsigned hash2 = dx_get_hash(entries + icount1);
+ 			dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
+@@ -1772,7 +2155,7 @@
+ 
+ 			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
+ 			err = ext4_journal_get_write_access(handle,
+-							     frames[0].bh);
++							    (frame - 1)->bh);
+ 			if (err)
+ 				goto journal_error;
+ 
+@@ -1788,18 +2171,24 @@
+ 				frame->entries = entries = entries2;
+ 				swap(frame->bh, bh2);
+ 			}
+-			dx_insert_block(frames + 0, hash2, newblock);
+-			dxtrace(dx_show_index("node", frames[1].entries));
++			dx_insert_block((frame - 1), hash2, newblock);
++			dxtrace(dx_show_index("node", frame->entries));
+ 			dxtrace(dx_show_index("node",
+ 			       ((struct dx_node *) bh2->b_data)->entries));
+ 			err = ext4_handle_dirty_metadata(handle, inode, bh2);
+ 			if (err)
+ 				goto journal_error;
+ 			brelse (bh2);
++			ext4_handle_dirty_metadata(handle, inode,
++						   (frame - 1)->bh);
++			if (restart) {
++				ext4_handle_dirty_metadata(handle, inode,
++							   frame->bh);
++				goto cleanup;
++			}
+ 		} else {
+ 			struct dx_root_info * info;
+-			dxtrace(printk(KERN_DEBUG
+-				       "Creating second level index...\n"));
++
+ 			memcpy((char *) entries2, (char *) entries,
+ 			       icount * sizeof(struct dx_entry));
+ 			dx_set_limit(entries2, dx_node_limit(dir));
+@@ -1809,32 +2198,60 @@
+ 			dx_set_block(entries + 0, newblock);
+ 			info = dx_get_dx_info((struct ext4_dir_entry_2*)
+ 					frames[0].bh->b_data);
+-			info->indirect_levels = 1;
++			info->indirect_levels += 1;
++			dxtrace(printk(KERN_DEBUG
++				       "Creating %d level index...\n",
++				       info->indirect_levels));
++			ext4_handle_dirty_metadata(handle, inode, frame->bh);
++			ext4_handle_dirty_metadata(handle, inode, bh2);
++			brelse(bh2);
++			restart = 1;
++			goto cleanup;
++		}
++	} else if (!ext4_htree_dx_locked(lck)) {
++		struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
+ 
+-			/* Add new access path frame */
+-			frame = frames + 1;
+-			frame->at = at = at - entries + entries2;
+-			frame->entries = entries = entries2;
+-			frame->bh = bh2;
+-			err = ext4_journal_get_write_access(handle,
+-							     frame->bh);
+-			if (err)
+-				goto journal_error;
++		/* not well protected, require DX lock */
++		ext4_htree_dx_need_lock(lck);
++		at = frame > frames ? (frame - 1)->at : NULL;
++
++		/* NB: no risk of deadlock because it's just a try.
++		 *
++		 * NB: we check ld_count for twice, the first time before
++		 * having DX lock, the second time after holding DX lock.
++		 *
++		 * NB: We never free blocks for directory so far, which
++		 * means value returned by dx_get_count() should equal to
++		 * ld->ld_count if nobody split any DE-block under @at,
++		 * and ld->ld_at still points to valid dx_entry. */
++		if ((ld->ld_count != dx_get_count(entries)) ||
++		    !ext4_htree_dx_lock_try(lck, at) ||
++		    (ld->ld_count != dx_get_count(entries))) {
++			restart = 1;
++			goto cleanup;
+ 		}
+-		ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
++		/* OK, I've got DX lock and nothing changed */
++		frame->at = ld->ld_at;
+ 	}
+-	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
++	de = do_split(handle, dir, &bh, frames, frame, &hinfo, lck, &err);
+ 	if (!de)
+ 		goto cleanup;
++
+ 	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
+ 	goto cleanup;
+ 
+ journal_error:
+ 	ext4_std_error(dir->i_sb, err);
+ cleanup:
++	ext4_htree_dx_unlock(lck);
++	ext4_htree_de_unlock(lck);
+ 	if (bh)
+ 		brelse(bh);
+ 	dx_release(frames);
++	/* @restart is true means htree-path has been changed, we need to
++	 * repeat dx_probe() to find out valid htree-path */
++	if (restart && err == 0)
++		goto again;
+ 	return err;
+ }
+ 
+@@ -1869,7 +2286,7 @@
+ 					blocksize);
+ 			else
+ 				de->inode = 0;
+-			dir->i_version++;
++			inode_inc_iversion(dir);
+ 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ 			ext4_handle_dirty_metadata(handle, dir, bh);
+ 			return 0;
+@@ -1912,7 +2329,7 @@
+ static int ext4_add_nondir(handle_t *handle,
+ 		struct dentry *dentry, struct inode *inode)
+ {
+-	int err = ext4_add_entry(handle, dentry, inode);
++	int err = ext4_add_entry(handle, dentry, inode, NULL);
+ 	if (!err) {
+ 		ext4_mark_inode_dirty(handle, inode);
+ 		d_instantiate(dentry, inode);
+@@ -2142,7 +2559,7 @@
+ 		goto out_stop;
+ 	}
+ 
+-	err = ext4_add_entry(handle, dentry, inode);
++	err = ext4_add_entry(handle, dentry, inode, NULL);
+ 	if (err) {
+ 		clear_nlink(inode);
+ 		unlock_new_inode(inode);
+@@ -2411,7 +2828,7 @@
+ 		return PTR_ERR(handle);
+ 
+ 	retval = -ENOENT;
+-	bh = ext4_find_entry(dir, &dentry->d_name, &de);
++	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+ 	if (!bh)
+ 		goto end_rmdir;
+ 
+@@ -2473,7 +2890,7 @@
+ 		ext4_handle_sync(handle);
+ 
+ 	retval = -ENOENT;
+-	bh = ext4_find_entry(dir, &dentry->d_name, &de);
++	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+ 	if (!bh)
+ 		goto end_unlink;
+ 
+@@ -2597,7 +3014,7 @@
+ 	ext4_inc_count(handle, inode);
+ 	atomic_inc(&inode->i_count);
+ 
+-	err = ext4_add_entry(handle, dentry, inode);
++	err = ext4_add_entry(handle, dentry, inode, NULL);
+ 	if (!err) {
+ 		ext4_mark_inode_dirty(handle, inode);
+ 		d_instantiate(dentry, inode);
+@@ -2642,7 +3059,7 @@
+ 	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+ 		ext4_handle_sync(handle);
+ 
+-	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
++	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
+ 	/*
+ 	 *  Check for inode number is _not_ due to possible IO errors.
+ 	 *  We might rmdir the source, keep it as pwd of some process
+@@ -2655,7 +3072,7 @@
+ 		goto end_rename;
+ 
+ 	new_inode = new_dentry->d_inode;
+-	new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de);
++	new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de, NULL);
+ 	if (new_bh) {
+ 		if (!new_inode) {
+ 			brelse(new_bh);
+@@ -2681,7 +3098,7 @@
+ 			goto end_rename;
+ 	}
+ 	if (!new_bh) {
+-		retval = ext4_add_entry(handle, new_dentry, old_inode);
++		retval = ext4_add_entry(handle, new_dentry, old_inode, NULL);
+ 		if (retval)
+ 			goto end_rename;
+ 	} else {
+@@ -2723,7 +3140,8 @@
+ 		struct buffer_head *old_bh2;
+ 		struct ext4_dir_entry_2 *old_de2;
+ 
+-		old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2);
++		old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
++					  &old_de2, NULL);
+ 		if (old_bh2) {
+ 			retval = ext4_delete_entry(handle, old_dir,
+ 						   old_de2, old_bh2);
+Index: linux-source-2.6.32/fs/ext4/inode.c
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/inode.c	2012-06-28 12:10:23.333666208 +0200
++++ linux-source-2.6.32/fs/ext4/inode.c	2012-06-28 12:11:51.985662967 +0200
+@@ -4923,7 +4923,7 @@
+ 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
+ 		ei->i_file_acl |=
+ 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
+-	inode->i_size = ext4_isize(raw_inode);
++	inode->i_size = ext4_isize(sb, raw_inode);
+ 	ei->i_disksize = inode->i_size;
+ #ifdef CONFIG_QUOTA
+ 	ei->i_reserved_quota = 0;
+Index: linux-source-2.6.32/fs/ext4/Makefile
+===================================================================
+--- linux-source-2.6.32.orig/fs/ext4/Makefile	2012-06-28 12:10:45.425668386 +0200
++++ linux-source-2.6.32/fs/ext4/Makefile	2012-06-28 12:11:51.985662967 +0200
+@@ -7,7 +7,7 @@
+ ext4-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+ 		ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
+ 		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
+-		mmp.o dynlocks.o
++		htree_lock.o mmp.o dynlocks.o
+ 
+ ext4-$(CONFIG_EXT4_FS_XATTR)		+= xattr.o xattr_user.o xattr_trusted.o
+ ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o

-- 
Lustre Debian Packaging 



More information about the Pkg-lustre-svn-commit mailing list