From 62cc643fe490948c06d5bd4e530eec44b0d36ade Mon Sep 17 00:00:00 2001
From: Alexey Lyashkov <alexey.lyashkov@seagate.com>
Date: Fri, 15 Jul 2016 16:04:53 +0300
Subject: [PATCH] MRP-0000 ldiskfs: use new API

with RH 6.5 old API was depricated and was removed.
backport a new API from ext4 upstream in opposite to copy-paste
older buggy code as fiemap now uses in own code.

(kernel upstream commit - 91dd8c114499e9818f2d5919ef0b9eee61810220
ext4: prevent race while walking extent tree for fiemap)

Signed-off-by: Alexey Lyashkov <alexey.lyashkov@seagate.com>
Change-Id: I5c3038ece2aa48fe0c59f9ac5a58a7764b7607fe
Signed-off-by: Alexey Lyashkov <alexey.lyashkov@seagate.com>
---
 .../ext4-add-new-abstraction-ext4_map_blocks.patch | 1011 +++++++++++++++++++
 .../patches/rhel6.5/ext4-ext-walk-space.patch      |  163 ----
 .../ext4-add-new-abstraction-ext4_map_blocks.patch | 1019 ++++++++++++++++++++
 .../rhel6.6/ext4_s_max_ext_tree_depth.patch        |   80 +-
 .../series/ldiskfs-2.6-rhel6.4.series              |    1 -
 .../series/ldiskfs-2.6-rhel6.5.series              |    4 +-
 .../series/ldiskfs-2.6-rhel6.6.series              |    6 +-
 .../series/ldiskfs-2.6-rhel6.7.series              |    4 +-
 .../series/ldiskfs-2.6-rhel6.8.series              |    4 +-
 9 files changed, 2067 insertions(+), 225 deletions(-)
 create mode 100644 ldiskfs/kernel_patches/patches/rhel6.5/ext4-add-new-abstraction-ext4_map_blocks.patch
 delete mode 100644 ldiskfs/kernel_patches/patches/rhel6.5/ext4-ext-walk-space.patch
 create mode 100644 ldiskfs/kernel_patches/patches/rhel6.6/ext4-add-new-abstraction-ext4_map_blocks.patch

diff --git a/ldiskfs/kernel_patches/patches/rhel6.5/ext4-add-new-abstraction-ext4_map_blocks.patch b/ldiskfs/kernel_patches/patches/rhel6.5/ext4-add-new-abstraction-ext4_map_blocks.patch
new file mode 100644
index 0000000..11ebba3
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/rhel6.5/ext4-add-new-abstraction-ext4_map_blocks.patch
@@ -0,0 +1,1011 @@
+From: Theodore Ts'o <tytso@mit.edu>
+
+From e35fd6609b2fee54484d520deccb8f18bf7d38f3 Mon Sep 17 00:00:00 2001
+
+
+Subject: [PATCH] ext4: Add new abstraction ext4_map_blocks() underneath
+ ext4_get_blocks()
+
+Jack up ext4_get_blocks() and add a new function, ext4_map_blocks()
+which uses a much smaller structure, struct ext4_map_blocks which is
+20 bytes, as opposed to a struct buffer_head, which nearly 5 times
+bigger on an x86_64 machine.  By switching things to use
+ext4_map_blocks(), we can save stack space by using ext4_map_blocks()
+since we can avoid allocating a struct buffer_head on the stack.
+
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Index: linux-stage/fs/ext4/ext4.h
+===================================================================
+--- linux-stage.orig/fs/ext4/ext4.h	2016-07-15 09:52:28.000000000 +0300
++++ linux-stage/fs/ext4/ext4.h	2016-07-15 09:52:29.000000000 +0300
+@@ -142,10 +142,8 @@ struct ext4_allocation_request {
+ #define EXT4_MAP_MAPPED		(1 << BH_Mapped)
+ #define EXT4_MAP_UNWRITTEN	(1 << BH_Unwritten)
+ #define EXT4_MAP_BOUNDARY	(1 << BH_Boundary)
+-#define EXT4_MAP_UNINIT		(1 << BH_Uninit)
+ #define EXT4_MAP_FLAGS		(EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
+-				 EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
+-				 EXT4_MAP_UNINIT)
++				 EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
+ 
+ struct ext4_map_blocks {
+ 	ext4_fsblk_t m_pblk;
+@@ -2184,9 +2182,9 @@ extern int ext4_ext_tree_init(handle_t *
+ extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
+ extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
+ 				       int chunk);
+-extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
+-			       ext4_lblk_t iblock, unsigned int max_blocks,
+-			       struct buffer_head *bh_result, int flags);
++#define HAVE_EXT4_MAP_BLOCKS
++extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
++			       struct ext4_map_blocks *map, int flags);
+ extern void ext4_ext_truncate(struct inode *);
+ extern int ext4_ext_punch_hole(struct inode *inode, loff_t offset,
+ 				loff_t length);
+@@ -2196,6 +2194,8 @@ extern long ext4_fallocate(struct inode 
+ 			  loff_t len);
+ extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+ 			  ssize_t len);
++extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
++			   struct ext4_map_blocks *map, int flags);
+ extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
+ 			   sector_t block, unsigned int max_blocks,
+ 			   struct buffer_head *bh, int flags);
+Index: linux-stage/fs/ext4/extents.c
+===================================================================
+--- linux-stage.orig/fs/ext4/extents.c	2016-07-15 09:52:28.000000000 +0300
++++ linux-stage/fs/ext4/extents.c	2016-07-15 09:53:10.000000000 +0300
+@@ -2960,7 +2960,7 @@ fix_extent_len:
+ 
+ #define EXT4_EXT_ZERO_LEN 7
+ /*
+- * This function is called by ext4_ext_get_blocks() if someone tries to write
++ * This function is called by ext4_ext_map_blocks() if someone tries to write
+  * to an uninitialized extent. It may result in splitting the uninitialized
+  * extent into multiple extents (upto three - one initialized and two
+  * uninitialized).
+@@ -2970,11 +2970,10 @@ fix_extent_len:
+  *   c> Splits in three extents: Somone is writing in middle of the extent
+  */
+ static int ext4_ext_convert_to_initialized(handle_t *handle,
+-						struct inode *inode,
+-						struct ext4_ext_path *path,
+-						ext4_lblk_t iblock,
+-						unsigned int max_blocks,
+-						int flags)
++					   struct inode *inode,
++					   struct ext4_map_blocks *map,
++					   struct ext4_ext_path *path,
++					   int flags)
+ {
+ 	struct ext4_extent *ex, newex, orig_ex;
+ 	struct ext4_extent *ex1 = NULL;
+@@ -2990,20 +2989,20 @@ static int ext4_ext_convert_to_initializ
+ 
+ 	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
+ 		"block %llu, max_blocks %u\n", inode->i_ino,
+-		(unsigned long long)iblock, max_blocks);
++		(unsigned long long)map->m_lblk, map->m_len);
+ 
+ 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ 		inode->i_sb->s_blocksize_bits;
+-	if (eof_block < iblock + max_blocks)
+-		eof_block = iblock + max_blocks;
++	if (eof_block < map->m_lblk + map->m_len)
++		eof_block = map->m_lblk + map->m_len;
+ 
+ 	depth = ext_depth(inode);
+ 	eh = path[depth].p_hdr;
+ 	ex = path[depth].p_ext;
+ 	ee_block = le32_to_cpu(ex->ee_block);
+ 	ee_len = ext4_ext_get_actual_len(ex);
+-	allocated = ee_len - (iblock - ee_block);
+-	newblock = iblock - ee_block + ext4_ext_pblock(ex);
++	allocated = ee_len - (map->m_lblk - ee_block);
++	newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
+ 
+ 	ex2 = ex;
+ 	orig_ex.ee_block = ex->ee_block;
+@@ -3033,10 +3032,10 @@ static int ext4_ext_convert_to_initializ
+ 		return allocated;
+ 	}
+ 
+-	/* ex1: ee_block to iblock - 1 : uninitialized */
+-	if (iblock > ee_block) {
++	/* ex1: ee_block to map->m_lblk - 1 : uninitialized */
++	if (map->m_lblk > ee_block) {
+ 		ex1 = ex;
+-		ex1->ee_len = cpu_to_le16(iblock - ee_block);
++		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ 		ext4_ext_mark_uninitialized(ex1);
+ 		ext4_ext_dirty(handle, inode, path + depth);
+ 		ex2 = &newex;
+@@ -3046,15 +3045,15 @@ static int ext4_ext_convert_to_initializ
+ 	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
+ 	 * overlap of blocks.
+ 	 */
+-	if (!ex1 && allocated > max_blocks)
+-		ex2->ee_len = cpu_to_le16(max_blocks);
++	if (!ex1 && allocated > map->m_len)
++		ex2->ee_len = cpu_to_le16(map->m_len);
+ 	/* ex3: to ee_block + ee_len : uninitialised */
+-	if (allocated > max_blocks) {
++	if (allocated > map->m_len) {
+ 		unsigned int newdepth;
+ 		/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
+ 		if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
+ 			/*
+-			 * iblock == ee_block is handled by the zerouout
++			 * map->m_lblk == ee_block is handled by the zerouout
+ 			 * at the beginning.
+ 			 * Mark first half uninitialized.
+ 			 * Mark second half initialized and zero out the
+@@ -3067,7 +3066,7 @@ static int ext4_ext_convert_to_initializ
+ 			ext4_ext_dirty(handle, inode, path + depth);
+ 
+ 			ex3 = &newex;
+-			ex3->ee_block = cpu_to_le32(iblock);
++			ex3->ee_block = cpu_to_le32(map->m_lblk);
+ 			ext4_ext_store_pblock(ex3, newblock);
+ 			ex3->ee_len = cpu_to_le16(allocated);
+ 			err = ext4_ext_insert_extent(handle, inode, path,
+@@ -3081,7 +3080,7 @@ static int ext4_ext_convert_to_initializ
+ 				ext4_ext_store_pblock(ex,
+ 					ext4_ext_pblock(&orig_ex));
+ 				ext4_ext_dirty(handle, inode, path + depth);
+-				/* blocks available from iblock */
++				/* blocks available from map->m_lblk */
+ 				return allocated;
+ 
+ 			} else if (err)
+@@ -3103,8 +3102,8 @@ static int ext4_ext_convert_to_initializ
+ 				 */
+ 				depth = ext_depth(inode);
+ 				ext4_ext_drop_refs(path);
+-				path = ext4_ext_find_extent(inode,
+-								iblock, path);
++				path = ext4_ext_find_extent(inode, map->m_lblk,
++							    path);
+ 				if (IS_ERR(path)) {
+ 					err = PTR_ERR(path);
+ 					return err;
+@@ -3124,9 +3123,9 @@ static int ext4_ext_convert_to_initializ
+ 			return allocated;
+ 		}
+ 		ex3 = &newex;
+-		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+-		ext4_ext_store_pblock(ex3, newblock + max_blocks);
+-		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
++		ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
++		ext4_ext_store_pblock(ex3, newblock + map->m_len);
++		ex3->ee_len = cpu_to_le16(allocated - map->m_len);
+ 		ext4_ext_mark_uninitialized(ex3);
+ 		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
+ 		if (err == -ENOSPC && may_zeroout) {
+@@ -3139,7 +3138,7 @@ static int ext4_ext_convert_to_initializ
+ 			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ 			ext4_ext_dirty(handle, inode, path + depth);
+ 			/* zeroed the full extent */
+-			/* blocks available from iblock */
++			/* blocks available from map->m_lblk */
+ 			return allocated;
+ 
+ 		} else if (err)
+@@ -3159,7 +3158,7 @@ static int ext4_ext_convert_to_initializ
+ 
+ 		depth = newdepth;
+ 		ext4_ext_drop_refs(path);
+-		path = ext4_ext_find_extent(inode, iblock, path);
++		path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ 		if (IS_ERR(path)) {
+ 			err = PTR_ERR(path);
+ 			goto out;
+@@ -3173,14 +3172,14 @@ static int ext4_ext_convert_to_initializ
+ 		if (err)
+ 			goto out;
+ 
+-		allocated = max_blocks;
++		allocated = map->m_len;
+ 
+ 		/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
+ 		 * to insert a extent in the middle zerout directly
+ 		 * otherwise give the extent a chance to merge to left
+ 		 */
+ 		if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
+-			iblock != ee_block && may_zeroout) {
++			map->m_lblk != ee_block && may_zeroout) {
+ 			err =  ext4_ext_zeroout(inode, &orig_ex);
+ 			if (err)
+ 				goto fix_extent_len;
+@@ -3190,7 +3189,7 @@ static int ext4_ext_convert_to_initializ
+ 			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ 			ext4_ext_dirty(handle, inode, path + depth);
+ 			/* zero out the first half */
+-			/* blocks available from iblock */
++			/* blocks available from map->m_lblk */
+ 			return allocated;
+ 		}
+ 	}
+@@ -3201,13 +3200,13 @@ static int ext4_ext_convert_to_initializ
+ 	 */
+ 	if (ex1 && ex1 != ex) {
+ 		ex1 = ex;
+-		ex1->ee_len = cpu_to_le16(iblock - ee_block);
++		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ 		ext4_ext_mark_uninitialized(ex1);
+ 		ext4_ext_dirty(handle, inode, path + depth);
+ 		ex2 = &newex;
+ 	}
+-	/* ex2: iblock to iblock + maxblocks-1 : initialised */
+-	ex2->ee_block = cpu_to_le32(iblock);
++	/* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
++	ex2->ee_block = cpu_to_le32(map->m_lblk);
+ 	ext4_ext_store_pblock(ex2, newblock);
+ 	ex2->ee_len = cpu_to_le16(allocated);
+ 	if (ex2 != ex)
+@@ -3277,7 +3276,7 @@ fix_extent_len:
+ }
+ 
+ /*
+- * This function is called by ext4_ext_get_blocks() from
++ * This function is called by ext4_ext_map_blocks() from
+  * ext4_get_blocks_dio_write() when DIO to write
+  * to an uninitialized extent.
+  *
+@@ -3300,9 +3299,8 @@ fix_extent_len:
+  */
+ static int ext4_split_unwritten_extents(handle_t *handle,
+ 					struct inode *inode,
++					struct ext4_map_blocks *map,
+ 					struct ext4_ext_path *path,
+-					ext4_lblk_t iblock,
+-					unsigned int max_blocks,
+ 					int flags)
+ {
+ 	struct ext4_extent *ex, newex, orig_ex;
+@@ -3318,20 +3316,20 @@ static int ext4_split_unwritten_extents(
+ 
+ 	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
+ 		"block %llu, max_blocks %u\n", inode->i_ino,
+-		(unsigned long long)iblock, max_blocks);
++		(unsigned long long)map->m_lblk, map->m_len);
+ 
+ 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ 		inode->i_sb->s_blocksize_bits;
+-	if (eof_block < iblock + max_blocks)
+-		eof_block = iblock + max_blocks;
++	if (eof_block < map->m_lblk + map->m_len)
++		eof_block = map->m_lblk + map->m_len;
+ 
+ 	depth = ext_depth(inode);
+ 	eh = path[depth].p_hdr;
+ 	ex = path[depth].p_ext;
+ 	ee_block = le32_to_cpu(ex->ee_block);
+ 	ee_len = ext4_ext_get_actual_len(ex);
+-	allocated = ee_len - (iblock - ee_block);
+-	newblock = iblock - ee_block + ext4_ext_pblock(ex);
++	allocated = ee_len - (map->m_lblk - ee_block);
++	newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
+ 
+ 	ex2 = ex;
+ 	orig_ex.ee_block = ex->ee_block;
+@@ -3349,16 +3347,16 @@ static int ext4_split_unwritten_extents(
+  	 * block where the write begins, and the write completely
+  	 * covers the extent, then we don't need to split it.
+  	 */
+-	if ((iblock == ee_block) && (allocated <= max_blocks))
++	if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
+ 		return allocated;
+ 
+ 	err = ext4_ext_get_access(handle, inode, path + depth);
+ 	if (err)
+ 		goto out;
+-	/* ex1: ee_block to iblock - 1 : uninitialized */
+-	if (iblock > ee_block) {
++	/* ex1: ee_block to map->m_lblk - 1 : uninitialized */
++	if (map->m_lblk > ee_block) {
+ 		ex1 = ex;
+-		ex1->ee_len = cpu_to_le16(iblock - ee_block);
++		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ 		ext4_ext_mark_uninitialized(ex1);
+ 		ext4_ext_dirty(handle, inode, path + depth);
+ 		ex2 = &newex;
+@@ -3368,15 +3366,15 @@ static int ext4_split_unwritten_extents(
+ 	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
+ 	 * overlap of blocks.
+ 	 */
+-	if (!ex1 && allocated > max_blocks)
+-		ex2->ee_len = cpu_to_le16(max_blocks);
++	if (!ex1 && allocated > map->m_len)
++		ex2->ee_len = cpu_to_le16(map->m_len);
+ 	/* ex3: to ee_block + ee_len : uninitialised */
+-	if (allocated > max_blocks) {
++	if (allocated > map->m_len) {
+ 		unsigned int newdepth;
+ 		ex3 = &newex;
+-		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+-		ext4_ext_store_pblock(ex3, newblock + max_blocks);
+-		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
++		ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
++		ext4_ext_store_pblock(ex3, newblock + map->m_len);
++		ex3->ee_len = cpu_to_le16(allocated - map->m_len);
+ 		ext4_ext_mark_uninitialized(ex3);
+ 		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
+ 		if (err == -ENOSPC && may_zeroout) {
+@@ -3400,8 +3398,8 @@ static int ext4_split_unwritten_extents(
+ 				err =  ext4_ext_zeroout(inode, ex3);
+ 				if (err)
+ 					goto fix_extent_len;
+-				max_blocks = allocated;
+-				ex2->ee_len = cpu_to_le16(max_blocks);
++				map->m_len = allocated;
++				ex2->ee_len = cpu_to_le16(map->m_len);
+ 				goto skip;
+ 			}
+ 			err =  ext4_ext_zeroout(inode, &orig_ex);
+@@ -3413,7 +3411,7 @@ static int ext4_split_unwritten_extents(
+ 			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ 			ext4_ext_dirty(handle, inode, path + depth);
+ 			/* zeroed the full extent */
+-			/* blocks available from iblock */
++			/* blocks available from map->m_lblk */
+ 			return allocated;
+ 
+ 		} else if (err)
+@@ -3433,7 +3431,7 @@ static int ext4_split_unwritten_extents(
+ 
+ 		depth = newdepth;
+ 		ext4_ext_drop_refs(path);
+-		path = ext4_ext_find_extent(inode, iblock, path);
++		path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ 		if (IS_ERR(path)) {
+ 			err = PTR_ERR(path);
+ 			goto out;
+@@ -3446,8 +3444,7 @@ static int ext4_split_unwritten_extents(
+ 		err = ext4_ext_get_access(handle, inode, path + depth);
+ 		if (err)
+ 			goto out;
+-
+-		allocated = max_blocks;
++		allocated = map->m_len;
+ 	}
+ skip:
+ 	/*
+@@ -3457,16 +3454,16 @@ skip:
+ 	 */
+ 	if (ex1 && ex1 != ex) {
+ 		ex1 = ex;
+-		ex1->ee_len = cpu_to_le16(iblock - ee_block);
++		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ 		ext4_ext_mark_uninitialized(ex1);
+ 		ext4_ext_dirty(handle, inode, path + depth);
+ 		ex2 = &newex;
+ 	}
+ 	/*
+-	 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
+-	 * uninitialised still.
++	 * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
++	 * using direct I/O, uninitialised still.
+ 	 */
+-	ex2->ee_block = cpu_to_le32(iblock);
++	ex2->ee_block = cpu_to_le32(map->m_lblk);
+ 	ext4_ext_store_pblock(ex2, newblock);
+ 	ex2->ee_len = cpu_to_le16(allocated);
+ 	ext4_ext_mark_uninitialized(ex2);
+@@ -3506,8 +3503,7 @@ fix_extent_len:
+ 
+ static int ext4_convert_unwritten_extents_dio(handle_t *handle,
+ 					      struct inode *inode,
+-					      ext4_lblk_t iblock,
+-					      unsigned int max_blocks,
++					      struct ext4_map_blocks *map,
+ 					      struct ext4_ext_path *path)
+ {
+ 	struct ext4_extent *ex;
+@@ -3529,14 +3525,13 @@ static int ext4_convert_unwritten_extent
+ 
+ 	/* If extent is larger than requested then split is required */
+ 
+-	if (ee_block != iblock || ee_len > max_blocks) {
+-		err = ext4_split_unwritten_extents(handle, inode, path,
+-					iblock, max_blocks,
++	if (ee_block != map->m_lblk || ee_len > map->m_len) {
++		err = ext4_split_unwritten_extents(handle, inode, map, path,
+ 					EXT4_EXT_DATA_VALID);
+ 		if (err < 0)
+ 			goto out;
+ 		ext4_ext_drop_refs(path);
+-		path = ext4_ext_find_extent(inode, iblock, path);
++		path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ 		if (IS_ERR(path)) {
+ 			err = PTR_ERR(path);
+ 			goto out;
+@@ -3627,10 +3622,9 @@ out:
+ 
+ static int
+ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+-			ext4_lblk_t iblock, unsigned int max_blocks,
++			struct ext4_map_blocks *map,
+ 			struct ext4_ext_path *path, int flags,
+-			unsigned int allocated, struct buffer_head *bh_result,
+-			ext4_fsblk_t newblock)
++			unsigned int allocated, ext4_fsblk_t newblock)
+ {
+ 	int ret = 0;
+ 	int err = 0;
+@@ -3638,7 +3632,7 @@ ext4_ext_handle_uninitialized_extents(ha
+ 
+ 	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
+ 		  "block %llu, max_blocks %u, flags %d, allocated %u",
+-		  inode->i_ino, (unsigned long long)iblock, max_blocks,
++		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
+ 		  flags, allocated);
+ 	ext4_ext_show_leaf(inode, path);
+ 
+@@ -3651,9 +3645,8 @@ ext4_ext_handle_uninitialized_extents(ha
+ 	/* DIO get_block() before submit the IO, split the extent */
+ 	if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
+ 	    EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
+-		ret = ext4_split_unwritten_extents(handle,
+-						inode, path, iblock,
+-						max_blocks, flags);
++		ret = ext4_split_unwritten_extents(handle, inode, map,
++						   path, flags);
+ 		/*
+ 		 * Flag the inode(non aio case) or end_io struct (aio case)
+ 		 * that this IO needs to convertion to written when IO is
+@@ -3670,12 +3663,11 @@ ext4_ext_handle_uninitialized_extents(ha
+ 	if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
+ 	    EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
+ 		ret = ext4_convert_unwritten_extents_dio(handle, inode,
+-							 iblock, max_blocks,
+-							 path);
++							 map, path);
+ 		if (ret >= 0) {
+ 			ext4_update_inode_fsync_trans(handle, inode, 1);
+-			err = check_eofblocks_fl(handle, inode, iblock, path,
+-						 max_blocks);
++			err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
++						 map->m_len);
+ 		} else
+ 			err = ret;
+ 		goto out2;
+@@ -3697,18 +3689,15 @@ ext4_ext_handle_uninitialized_extents(ha
+ 		 * the buffer head will be unmapped so that
+ 		 * a read from the block returns 0s.
+ 		 */
+-		set_buffer_unwritten(bh_result);
++		map->m_flags |= EXT4_MAP_UNWRITTEN;
+ 		goto out1;
+ 	}
+ 
+ 	/* buffered write, writepage time, convert*/
+-	ret = ext4_ext_convert_to_initialized(handle, inode,
+-						path, iblock,
+-						max_blocks,
+-						flags);
++	ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
+ 	if (ret >= 0) {
+ 		ext4_update_inode_fsync_trans(handle, inode, 1);
+-		err = check_eofblocks_fl(handle, inode, iblock, path, max_blocks);
++		err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
+ 		if (err < 0)
+ 			goto out2;
+ 	}
+@@ -3718,7 +3707,7 @@ out:
+ 		goto out2;
+ 	} else
+ 		allocated = ret;
+-	set_buffer_new(bh_result);
++	map->m_flags |= EXT4_MAP_NEW;
+ 	/*
+ 	 * if we allocated more blocks than requested
+ 	 * we need to make sure we unmap the extra block
+@@ -3726,11 +3715,11 @@ out:
+ 	 * unmapped later when we find the buffer_head marked
+ 	 * new.
+ 	 */
+-	if (allocated > max_blocks) {
++	if (allocated > map->m_len) {
+ 		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
+-					newblock + max_blocks,
+-					allocated - max_blocks);
+-		allocated = max_blocks;
++					newblock + map->m_len,
++					allocated - map->m_len);
++		allocated = map->m_len;
+ 	}
+ 
+ 	/*
+@@ -3744,13 +3733,13 @@ out:
+ 		ext4_da_update_reserve_space(inode, allocated, 0);
+ 
+ map_out:
+-	set_buffer_mapped(bh_result);
++	map->m_flags |= EXT4_MAP_MAPPED;
+ out1:
+-	if (allocated > max_blocks)
+-		allocated = max_blocks;
++	if (allocated > map->m_len)
++		allocated = map->m_len;
+ 	ext4_ext_show_leaf(inode, path);
+-	bh_result->b_bdev = inode->i_sb->s_bdev;
+-	bh_result->b_blocknr = newblock;
++	map->m_pblk = newblock;
++	map->m_len = allocated;
+ out2:
+ 	if (path) {
+ 		ext4_ext_drop_refs(path);
+@@ -3777,10 +3766,8 @@ out2:
+  *
+  * return < 0, error case.
+  */
+-int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
+-			ext4_lblk_t iblock,
+-			unsigned int max_blocks, struct buffer_head *bh_result,
+-			int flags)
++int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
++			struct ext4_map_blocks *map, int flags)
+ {
+ 	struct ext4_ext_path *path = NULL;
+ 	struct ext4_extent_header *eh;
+@@ -3791,12 +3778,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	struct ext4_allocation_request ar;
+ 	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
+ 
+-	__clear_bit(BH_New, &bh_result->b_state);
+ 	ext_debug("blocks %u/%u requested for inode %lu\n",
+-			iblock, max_blocks, inode->i_ino);
++		  map->m_lblk, map->m_len, inode->i_ino);
+ 
+ 	/* check in cache */
+-	if (ext4_ext_in_cache(inode, iblock, &newex)) {
++	if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
+ 		if (!newex.ee_start_lo && !newex.ee_start_hi) {
+ 			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
+ 				/*
+@@ -3808,18 +3794,18 @@ int ext4_ext_get_blocks(handle_t *handle
+ 			/* we should allocate requested block */
+ 		} else {
+ 			/* block is already allocated */
+-			newblock = iblock
++			newblock = map->m_lblk
+ 				   - le32_to_cpu(newex.ee_block)
+ 				   + ext4_ext_pblock(&newex);
+ 			/* number of remaining blocks in the extent */
+ 			allocated = ext4_ext_get_actual_len(&newex) -
+-					(iblock - le32_to_cpu(newex.ee_block));
++					(map->m_lblk - le32_to_cpu(newex.ee_block));
+ 			goto out;
+ 		}
+ 	}
+ 
+ 	/* find extent for this block */
+-	path = ext4_ext_find_extent(inode, iblock, NULL);
++	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
+ 	if (IS_ERR(path)) {
+ 		err = PTR_ERR(path);
+ 		path = NULL;
+@@ -3836,7 +3822,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
+ 		EXT4_ERROR_INODE(inode, "bad extent address "
+ 				 "iblock: %d, depth: %d pblock %lld",
+-				 iblock, depth, path[depth].p_block);
++				 map->m_lblk, depth, path[depth].p_block);
+ 		err = -EIO;
+ 		goto out2;
+ 	}
+@@ -3854,11 +3840,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ 		 */
+ 		ee_len = ext4_ext_get_actual_len(ex);
+ 		/* if found extent covers block, simply return it */
+-		if (in_range(iblock, ee_block, ee_len)) {
+-			newblock = iblock - ee_block + ee_start;
++		if (in_range(map->m_lblk, ee_block, ee_len)) {
++			newblock = map->m_lblk - ee_block + ee_start;
+ 			/* number of remaining blocks in the extent */
+-			allocated = ee_len - (iblock - ee_block);
+-			ext_debug("%u fit into %u:%d -> %llu\n", iblock,
++			allocated = ee_len - (map->m_lblk - ee_block);
++			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
+ 					ee_block, ee_len, newblock);
+ 
+ 			/*
+@@ -3870,9 +3856,9 @@ int ext4_ext_get_blocks(handle_t *handle
+ 					ee_len, ee_start);
+ 				goto out;
+ 			}
+-			ret = ext4_ext_handle_uninitialized_extents(
+-				handle, inode, iblock, max_blocks, path,
+-				flags, allocated, bh_result, newblock);
++			ret = ext4_ext_handle_uninitialized_extents(handle,
++					inode, map, path, flags, allocated,
++					newblock);
+ 			return ret;
+ 		}
+ 	}
+@@ -3886,7 +3872,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ 		 * put just found gap into cache to speed up
+ 		 * subsequent requests
+ 		 */
+-		ext4_ext_put_gap_in_cache(inode, path, iblock);
++		ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
+ 		goto out2;
+ 	}
+ 	/*
+@@ -3894,11 +3880,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	 */
+ 
+ 	/* find neighbour allocated blocks */
+-	ar.lleft = iblock;
++	ar.lleft = map->m_lblk;
+ 	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
+ 	if (err)
+ 		goto out2;
+-	ar.lright = iblock;
++	ar.lright = map->m_lblk;
+ 	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
+ 	if (err)
+ 		goto out2;
+@@ -3909,26 +3895,26 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
+ 	 * EXT_UNINIT_MAX_LEN.
+ 	 */
+-	if (max_blocks > EXT_INIT_MAX_LEN &&
++	if (map->m_len > EXT_INIT_MAX_LEN &&
+ 	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
+-		max_blocks = EXT_INIT_MAX_LEN;
+-	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
++		map->m_len = EXT_INIT_MAX_LEN;
++	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
+ 		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
+-		max_blocks = EXT_UNINIT_MAX_LEN;
++		map->m_len = EXT_UNINIT_MAX_LEN;
+ 
+-	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
+-	newex.ee_block = cpu_to_le32(iblock);
+-	newex.ee_len = cpu_to_le16(max_blocks);
++	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
++	newex.ee_block = cpu_to_le32(map->m_lblk);
++	newex.ee_len = cpu_to_le16(map->m_len);
+ 	err = ext4_ext_check_overlap(inode, &newex, path);
+ 	if (err)
+ 		allocated = ext4_ext_get_actual_len(&newex);
+ 	else
+-		allocated = max_blocks;
++		allocated = map->m_len;
+ 
+ 	/* allocate new block */
+ 	ar.inode = inode;
+-	ar.goal = ext4_ext_find_goal(inode, path, iblock);
+-	ar.logical = iblock;
++	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
++	ar.logical = map->m_lblk;
+ 	ar.len = allocated;
+ 	if (S_ISREG(inode->i_mode))
+ 		ar.flags = EXT4_MB_HINT_DATA;
+@@ -3967,7 +3953,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ 		}
+ 	}
+ 
+-	err = check_eofblocks_fl(handle, inode, iblock, path, ar.len);
++	err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
+ 	if (err)
+ 		goto out2;
+ 
+@@ -3987,9 +3973,9 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	/* previous routine could use block we allocated */
+ 	newblock = ext4_ext_pblock(&newex);
+ 	allocated = ext4_ext_get_actual_len(&newex);
+-	if (allocated > max_blocks)
+-		allocated = max_blocks;
+-	set_buffer_new(bh_result);
++	if (allocated > map->m_len)
++		allocated = map->m_len;
++	map->m_flags |= EXT4_MAP_NEW;
+ 
+ 	/*
+ 	 * Update reserved blocks/metadata blocks after successful
+@@ -4003,17 +3989,17 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	 * when it is _not_ an uninitialized extent.
+ 	 */
+ 	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
+-		ext4_ext_put_in_cache(inode, iblock, allocated, newblock);
++		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
+ 		ext4_update_inode_fsync_trans(handle, inode, 1);
+ 	} else
+ 		ext4_update_inode_fsync_trans(handle, inode, 0);
+ out:
+-	if (allocated > max_blocks)
+-		allocated = max_blocks;
++	if (allocated > map->m_len)
++		allocated = map->m_len;
+ 	ext4_ext_show_leaf(inode, path);
+-	set_buffer_mapped(bh_result);
+-	bh_result->b_bdev = inode->i_sb->s_bdev;
+-	bh_result->b_blocknr = newblock;
++	map->m_flags |= EXT4_MAP_MAPPED;
++	map->m_pblk = newblock;
++	map->m_len = allocated;
+ out2:
+ 	if (path) {
+ 		ext4_ext_drop_refs(path);
+@@ -4196,7 +4182,7 @@ retry:
+ 		if (ret <= 0) {
+ #ifdef EXT4FS_DEBUG
+ 			WARN_ON(ret <= 0);
+-			printk(KERN_ERR "%s: ext4_ext_get_blocks "
++			printk(KERN_ERR "%s: ext4_ext_map_blocks "
+ 				    "returned error inode#%lu, block=%u, "
+ 				    "max_blocks=%u", __func__,
+ 				    inode->i_ino, block, max_blocks);
+@@ -4709,6 +4695,5 @@ EXPORT_SYMBOL(ext4_ext_insert_extent);
+ EXPORT_SYMBOL(ext4_mb_new_blocks);
+ EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
+ EXPORT_SYMBOL(ext4_mark_inode_dirty);
+-EXPORT_SYMBOL(ext4_ext_walk_space);
+ EXPORT_SYMBOL(ext4_ext_find_extent);
+ EXPORT_SYMBOL(ext4_ext_drop_refs);
+Index: linux-stage/fs/ext4/inode.c
+===================================================================
+--- linux-stage.orig/fs/ext4/inode.c	2016-07-15 09:52:28.000000000 +0300
++++ linux-stage/fs/ext4/inode.c	2016-07-15 09:52:29.000000000 +0300
+@@ -200,7 +200,7 @@ int ext4_truncate_restart_trans(handle_t
+ 	int ret;
+ 
+ 	/*
+-	 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
++	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
+ 	 * moment, get_block can be called only for blocks inside i_size since
+ 	 * page cache has been already dropped and writes are blocked by
+ 	 * i_mutex. So we can safely drop the i_data_sem here.
+@@ -970,9 +970,9 @@ err_out:
+ }
+ 
+ /*
+- * The ext4_ind_get_blocks() function handles non-extents inodes
++ * The ext4_ind_map_blocks() function handles non-extents inodes
+  * (i.e., using the traditional indirect/double-indirect i_blocks
+- * scheme) for ext4_get_blocks().
++ * scheme) for ext4_map_blocks().
+  *
+  * Allocation strategy is simple: if we have to allocate something, we will
+  * have to go the whole way to leaf. So let's do it before attaching anything
+@@ -991,15 +991,14 @@ err_out:
+  * return = 0, if plain lookup failed.
+  * return < 0, error case.
+  *
+- * The ext4_ind_get_blocks() function should be called with
++ * The ext4_ind_map_blocks() function should be called with
+  * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
+  * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
+  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
+  * blocks.
+  */
+-static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
+-			       ext4_lblk_t iblock, unsigned int maxblocks,
+-			       struct buffer_head *bh_result,
++static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
++			       struct ext4_map_blocks *map,
+ 			       int flags)
+ {
+ 	int err = -EIO;
+@@ -1015,7 +1014,7 @@ static int ext4_ind_get_blocks(handle_t 
+ 
+ 	J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
+ 	J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
+-	depth = ext4_block_to_path(inode, iblock, offsets,
++	depth = ext4_block_to_path(inode, map->m_lblk, offsets,
+ 				   &blocks_to_boundary);
+ 
+ 	if (depth == 0)
+@@ -1026,10 +1025,9 @@ static int ext4_ind_get_blocks(handle_t 
+ 	/* Simplest case - block found, no allocation needed */
+ 	if (!partial) {
+ 		first_block = le32_to_cpu(chain[depth - 1].key);
+-		clear_buffer_new(bh_result);
+ 		count++;
+ 		/*map more blocks*/
+-		while (count < maxblocks && count <= blocks_to_boundary) {
++		while (count < map->m_len && count <= blocks_to_boundary) {
+ 			ext4_fsblk_t blk;
+ 
+ 			blk = le32_to_cpu(*(chain[depth-1].p + count));
+@@ -1049,7 +1047,7 @@ static int ext4_ind_get_blocks(handle_t 
+ 	/*
+ 	 * Okay, we need to do block allocation.
+ 	*/
+-	goal = ext4_find_goal(inode, iblock, partial);
++	goal = ext4_find_goal(inode, map->m_lblk, partial);
+ 
+ 	/* the number of blocks need to allocate for [d,t]indirect blocks */
+ 	indirect_blks = (chain + depth) - partial - 1;
+@@ -1059,11 +1057,11 @@ static int ext4_ind_get_blocks(handle_t 
+ 	 * direct blocks to allocate for this branch.
+ 	 */
+ 	count = ext4_blks_to_allocate(partial, indirect_blks,
+-					maxblocks, blocks_to_boundary);
++				      map->m_len, blocks_to_boundary);
+ 	/*
+ 	 * Block out ext4_truncate while we alter the tree
+ 	 */
+-	err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
++	err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
+ 				&count, goal,
+ 				offsets + (partial - chain), partial);
+ 
+@@ -1075,18 +1073,20 @@ static int ext4_ind_get_blocks(handle_t 
+ 	 * may need to return -EAGAIN upwards in the worst case.  --sct
+ 	 */
+ 	if (!err)
+-		err = ext4_splice_branch(handle, inode, iblock,
++		err = ext4_splice_branch(handle, inode, map->m_lblk,
+ 					 partial, indirect_blks, count);
+ 	if (err)
+ 		goto cleanup;
+ 
+-	set_buffer_new(bh_result);
++	map->m_flags |= EXT4_MAP_NEW;
+ 
+ 	ext4_update_inode_fsync_trans(handle, inode, 1);
+ got_it:
+-	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
++	map->m_flags |= EXT4_MAP_MAPPED;
++	map->m_pblk = le32_to_cpu(chain[depth-1].key);
++	map->m_len = count;
+ 	if (count > blocks_to_boundary)
+-		set_buffer_boundary(bh_result);
++		map->m_flags |= EXT4_MAP_BOUNDARY;
+ 	err = count;
+ 	/* Clean up and exit */
+ 	partial = chain + depth - 1;	/* the whole chain */
+@@ -1096,7 +1096,6 @@ cleanup:
+ 		brelse(partial->bh);
+ 		partial--;
+ 	}
+-	BUFFER_TRACE(bh_result, "returned");
+ out:
+ 	return err;
+ }
+@@ -1291,15 +1290,15 @@ static pgoff_t ext4_num_dirty_pages(stru
+ }
+ 
+ /*
+- * The ext4_get_blocks() function tries to look up the requested blocks,
++ * The ext4_map_blocks() function tries to look up the requested blocks,
+  * and returns if the blocks are already mapped.
+  *
+  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
+  * and store the allocated blocks in the result buffer head and mark it
+  * mapped.
+  *
+- * If file type is extents based, it will call ext4_ext_get_blocks(),
+- * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
++ * If file type is extents based, it will call ext4_ext_map_blocks(),
++ * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
+  * based files
+  *
+  * On success, it returns the number of blocks being mapped or allocate.
+@@ -1312,35 +1311,31 @@ static pgoff_t ext4_num_dirty_pages(stru
+  *
+  * It returns the error in case of allocation failure.
+  */
+-int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
+-		    unsigned int max_blocks, struct buffer_head *bh,
+-		    int flags)
++int ext4_map_blocks(handle_t *handle, struct inode *inode,
++		    struct ext4_map_blocks *map, int flags)
+ {
+ 	int retval;
+ 
+-	clear_buffer_mapped(bh);
+-	clear_buffer_unwritten(bh);
++	map->m_flags = 0;
++	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
++		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
++		  (unsigned long) map->m_lblk);
+ 
+-	ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
+-		  "logical block %lu\n", inode->i_ino, flags, max_blocks,
+-		  (unsigned long)block);
+ 	/*
+ 	 * Try to see if we can get the block without requesting a new
+ 	 * file system block.
+ 	 */
+ 	down_read((&EXT4_I(inode)->i_data_sem));
+ 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+-		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
+-				bh, 0);
++		retval = ext4_ext_map_blocks(handle, inode, map, 0);
+ 	} else {
+-		retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
+-					     bh, 0);
++		retval = ext4_ind_map_blocks(handle, inode, map, 0);
+ 	}
+ 	up_read((&EXT4_I(inode)->i_data_sem));
+ 
+-	if (retval > 0 && buffer_mapped(bh)) {
++	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+ 		int ret = check_block_validity(inode, "file system corruption",
+-					       block, bh->b_blocknr, retval);
++					map->m_lblk, map->m_pblk, retval);
+ 		if (ret != 0)
+ 			return ret;
+ 	}
+@@ -1356,7 +1351,7 @@ int ext4_get_blocks(handle_t *handle, st
+ 	 * ext4_ext_get_block() returns th create = 0
+ 	 * with buffer head unmapped.
+ 	 */
+-	if (retval > 0 && buffer_mapped(bh))
++	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
+ 		return retval;
+ 
+ 	/*
+@@ -1369,7 +1364,7 @@ int ext4_get_blocks(handle_t *handle, st
+ 	 * of BH_Unwritten and BH_Mapped flags being simultaneously
+ 	 * set on the buffer_head.
+ 	 */
+-	clear_buffer_unwritten(bh);
++	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
+ 
+ 	/*
+ 	 * New blocks allocate and/or writing to uninitialized extent
+@@ -1392,13 +1387,11 @@ int ext4_get_blocks(handle_t *handle, st
+ 	 * could have changed the inode type in between
+ 	 */
+ 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+-		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
+-					      bh, flags);
++		retval = ext4_ext_map_blocks(handle, inode, map, flags);
+ 	} else {
+-		retval = ext4_ind_get_blocks(handle, inode, block,
+-					     max_blocks, bh, flags);
++		retval = ext4_ind_map_blocks(handle, inode, map, flags);
+ 
+-		if (retval > 0 && buffer_new(bh)) {
++		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
+ 			/*
+ 			 * We allocated new blocks which will result in
+ 			 * i_data's format changing.  Force the migrate
+@@ -1421,15 +1414,38 @@ int ext4_get_blocks(handle_t *handle, st
+ 		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
+ 
+ 	up_write((&EXT4_I(inode)->i_data_sem));
+-	if (retval > 0 && buffer_mapped(bh)) {
++	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+ 		int ret = check_block_validity(inode, "file system "
+ 					       "corruption after allocation",
+-					       block, bh->b_blocknr, retval);
++					       map->m_lblk, map->m_pblk,
++					       retval);
+ 		if (ret != 0)
+ 			return ret;
+ 	}
+ 	return retval;
+ }
++EXPORT_SYMBOL(ext4_map_blocks);
++
++int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
++		    unsigned int max_blocks, struct buffer_head *bh,
++		    int flags)
++{
++	struct ext4_map_blocks map;
++	int ret;
++
++	map.m_lblk = block;
++	map.m_len = max_blocks;
++
++	ret = ext4_map_blocks(handle, inode, &map, flags);
++	if (ret < 0)
++		return ret;
++
++	bh->b_blocknr = map.m_pblk;
++	bh->b_size = inode->i_sb->s_blocksize * map.m_len;
++	bh->b_bdev = inode->i_sb->s_bdev;
++	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
++	return ret;
++}
+ 
+ /* Maximum number of blocks we map for direct IO at once. */
+ #define DIO_MAX_BLOCKS 4096
diff --git a/ldiskfs/kernel_patches/patches/rhel6.5/ext4-ext-walk-space.patch b/ldiskfs/kernel_patches/patches/rhel6.5/ext4-ext-walk-space.patch
deleted file mode 100644
index 714de7b..0000000
--- a/ldiskfs/kernel_patches/patches/rhel6.5/ext4-ext-walk-space.patch
+++ /dev/null
@@ -1,163 +0,0 @@
-Restore ext4_ext_walk_space().
-Copy from rhel6.4 [2.6.32-358.23.2.el6] kernel.
-
-Index: linux-2.6.32-431.3.1.el6.x86_64/fs/ext4/extents.c
-===================================================================
---- linux-2.6.32-431.3.1.el6.x86_64.orig/fs/ext4/extents.c
-+++ linux-2.6.32-431.3.1.el6.x86_64/fs/ext4/extents.c
-@@ -4487,6 +4487,121 @@ static int ext4_xattr_fiemap(struct inod
- 	return (error < 0 ? error : 0);
- }
- 
-+int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
-+			ext4_lblk_t num, ext_prepare_callback func,
-+			void *cbdata)
-+{
-+	struct ext4_ext_path *path = NULL;
-+	struct ext4_ext_cache cbex;
-+	struct ext4_extent *ex;
-+	ext4_lblk_t next, start = 0, end = 0;
-+	ext4_lblk_t last = block + num;
-+	int depth, exists, err = 0;
-+
-+	BUG_ON(func == NULL);
-+	BUG_ON(inode == NULL);
-+
-+	while (block < last && block != EXT_MAX_BLOCKS) {
-+		num = last - block;
-+		/* find extent for this block */
-+		down_read(&EXT4_I(inode)->i_data_sem);
-+		path = ext4_ext_find_extent(inode, block, path);
-+		up_read(&EXT4_I(inode)->i_data_sem);
-+		if (IS_ERR(path)) {
-+			err = PTR_ERR(path);
-+			path = NULL;
-+			break;
-+		}
-+
-+		depth = ext_depth(inode);
-+		if (unlikely(path[depth].p_hdr == NULL)) {
-+			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
-+			err = -EIO;
-+			break;
-+		}
-+		ex = path[depth].p_ext;
-+		next = ext4_ext_next_allocated_block(path);
-+
-+		exists = 0;
-+		if (!ex) {
-+			/* there is no extent yet, so try to allocate
-+			 * all requested space */
-+			start = block;
-+			end = block + num;
-+		} else if (le32_to_cpu(ex->ee_block) > block) {
-+			/* need to allocate space before found extent */
-+			start = block;
-+			end = le32_to_cpu(ex->ee_block);
-+			if (block + num < end)
-+				end = block + num;
-+		} else if (block >= le32_to_cpu(ex->ee_block)
-+					+ ext4_ext_get_actual_len(ex)) {
-+			/* need to allocate space after found extent */
-+			start = block;
-+			end = block + num;
-+			if (end >= next)
-+				end = next;
-+		} else if (block >= le32_to_cpu(ex->ee_block)) {
-+			/*
-+			 * some part of requested space is covered
-+			 * by found extent
-+			 */
-+			start = block;
-+			end = le32_to_cpu(ex->ee_block)
-+				+ ext4_ext_get_actual_len(ex);
-+			if (block + num < end)
-+				end = block + num;
-+			exists = 1;
-+		} else {
-+			BUG();
-+		}
-+		BUG_ON(end <= start);
-+
-+		if (!exists) {
-+			cbex.ec_block = start;
-+			cbex.ec_len = end - start;
-+			cbex.ec_start = 0;
-+		} else {
-+			cbex.ec_block = le32_to_cpu(ex->ee_block);
-+			cbex.ec_len = ext4_ext_get_actual_len(ex);
-+			cbex.ec_start = ext4_ext_pblock(ex);
-+		}
-+
-+		if (unlikely(cbex.ec_len == 0)) {
-+			EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
-+			err = -EIO;
-+			break;
-+		}
-+		err = func(inode, path, &cbex, ex, cbdata);
-+		ext4_ext_drop_refs(path);
-+
-+		if (err < 0)
-+			break;
-+
-+		if (err == EXT_REPEAT)
-+			continue;
-+		else if (err == EXT_BREAK) {
-+			err = 0;
-+			break;
-+		}
-+
-+		if (ext_depth(inode) != depth) {
-+			/* depth was changed. we have to realloc path */
-+			kfree(path);
-+			path = NULL;
-+		}
-+
-+		block = cbex.ec_block + cbex.ec_len;
-+	}
-+
-+	if (path) {
-+		ext4_ext_drop_refs(path);
-+		kfree(path);
-+	}
-+
-+	return err;
-+}
-+
- /*
-  * ext4_ext_punch_hole
-  *
-Index: linux-2.6.32-431.3.1.el6.x86_64/fs/ext4/ext4_extents.h
-===================================================================
---- linux-2.6.32-431.3.1.el6.x86_64.orig/fs/ext4/ext4_extents.h
-+++ linux-2.6.32-431.3.1.el6.x86_64/fs/ext4/ext4_extents.h
-@@ -120,6 +120,19 @@ struct ext4_ext_path {
- 	struct ext4_extent_header	*p_hdr;
- 	struct buffer_head		*p_bh;
- };
-+/*
-+ * to be called by ext4_ext_walk_space()
-+ * negative retcode - error
-+ * positive retcode - signal for ext4_ext_walk_space(), see below
-+ * callback must return valid extent (passed or newly created)
-+ */
-+typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
-+					struct ext4_ext_cache *,
-+					struct ext4_extent *, void *);
-+
-+#define EXT_CONTINUE   0
-+#define EXT_BREAK      1
-+#define EXT_REPEAT     2
- 
- /*
-  * structure for external API
-@@ -272,6 +285,9 @@ static inline void ext4_idx_store_pblock
- 	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) &
- 				     0xffff);
- }
-+extern int ext4_ext_walk_space(struct inode *, ext4_lblk_t,
-+			ext4_lblk_t, ext_prepare_callback,
-+			void *);
- 
- extern int ext4_ext_calc_metadata_amount(struct inode *inode,
- 					 sector_t lblocks);
diff --git a/ldiskfs/kernel_patches/patches/rhel6.6/ext4-add-new-abstraction-ext4_map_blocks.patch b/ldiskfs/kernel_patches/patches/rhel6.6/ext4-add-new-abstraction-ext4_map_blocks.patch
new file mode 100644
index 0000000..0fe87ff
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/rhel6.6/ext4-add-new-abstraction-ext4_map_blocks.patch
@@ -0,0 +1,1019 @@
+From: Theodore Ts'o <tytso@mit.edu>
+
+From e35fd6609b2fee54484d520deccb8f18bf7d38f3 Mon Sep 17 00:00:00 2001
+
+
+Subject: [PATCH] ext4: Add new abstraction ext4_map_blocks() underneath
+ ext4_get_blocks()
+
+Jack up ext4_get_blocks() and add a new function, ext4_map_blocks()
+which uses a much smaller structure, struct ext4_map_blocks which is
+20 bytes, as opposed to a struct buffer_head, which nearly 5 times
+bigger on an x86_64 machine.  By switching things to use
+ext4_map_blocks(), we can save stack space by using ext4_map_blocks()
+since we can avoid allocating a struct buffer_head on the stack.
+
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Index: linux-stage/fs/ext4/ext4.h
+===================================================================
+--- linux-stage.orig/fs/ext4/ext4.h	2016-07-15 12:13:05.000000000 +0300
++++ linux-stage/fs/ext4/ext4.h	2016-07-15 12:13:05.000000000 +0300
+@@ -142,10 +142,8 @@ struct ext4_allocation_request {
+ #define EXT4_MAP_MAPPED		(1 << BH_Mapped)
+ #define EXT4_MAP_UNWRITTEN	(1 << BH_Unwritten)
+ #define EXT4_MAP_BOUNDARY	(1 << BH_Boundary)
+-#define EXT4_MAP_UNINIT		(1 << BH_Uninit)
+ #define EXT4_MAP_FLAGS		(EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
+-				 EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
+-				 EXT4_MAP_UNINIT)
++				 EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
+ 
+ struct ext4_map_blocks {
+ 	ext4_fsblk_t m_pblk;
+@@ -2194,9 +2192,9 @@ extern int ext4_ext_tree_init(handle_t *
+ extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
+ extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
+ 				       int chunk);
+-extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
+-			       ext4_lblk_t iblock, unsigned int max_blocks,
+-			       struct buffer_head *bh_result, int flags);
++#define HAVE_EXT4_MAP_BLOCKS
++extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
++			       struct ext4_map_blocks *map, int flags);
+ extern void ext4_ext_truncate(struct inode *);
+ extern int ext4_ext_punch_hole(struct inode *inode, loff_t offset,
+ 				loff_t length);
+@@ -2206,6 +2204,8 @@ extern long ext4_fallocate(struct inode 
+ 			  loff_t len);
+ extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+ 			  ssize_t len);
++extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
++			   struct ext4_map_blocks *map, int flags);
+ extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
+ 			   sector_t block, unsigned int max_blocks,
+ 			   struct buffer_head *bh, int flags);
+Index: linux-stage/fs/ext4/extents.c
+===================================================================
+--- linux-stage.orig/fs/ext4/extents.c	2016-07-15 12:13:04.000000000 +0300
++++ linux-stage/fs/ext4/extents.c	2016-07-15 12:13:05.000000000 +0300
+@@ -2960,7 +2960,7 @@ fix_extent_len:
+ 
+ #define EXT4_EXT_ZERO_LEN 7
+ /*
+- * This function is called by ext4_ext_get_blocks() if someone tries to write
++ * This function is called by ext4_ext_map_blocks() if someone tries to write
+  * to an uninitialized extent. It may result in splitting the uninitialized
+  * extent into multiple extents (upto three - one initialized and two
+  * uninitialized).
+@@ -2970,11 +2970,10 @@ fix_extent_len:
+  *   c> Splits in three extents: Somone is writing in middle of the extent
+  */
+ static int ext4_ext_convert_to_initialized(handle_t *handle,
+-						struct inode *inode,
+-						struct ext4_ext_path *path,
+-						ext4_lblk_t iblock,
+-						unsigned int max_blocks,
+-						int flags)
++					   struct inode *inode,
++					   struct ext4_map_blocks *map,
++					   struct ext4_ext_path *path,
++					   int flags)
+ {
+ 	struct ext4_extent *ex, newex, orig_ex;
+ 	struct ext4_extent *ex1 = NULL;
+@@ -2990,20 +2989,20 @@ static int ext4_ext_convert_to_initializ
+ 
+ 	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
+ 		"block %llu, max_blocks %u\n", inode->i_ino,
+-		(unsigned long long)iblock, max_blocks);
++		(unsigned long long)map->m_lblk, map->m_len);
+ 
+ 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ 		inode->i_sb->s_blocksize_bits;
+-	if (eof_block < iblock + max_blocks)
+-		eof_block = iblock + max_blocks;
++	if (eof_block < map->m_lblk + map->m_len)
++		eof_block = map->m_lblk + map->m_len;
+ 
+ 	depth = ext_depth(inode);
+ 	eh = path[depth].p_hdr;
+ 	ex = path[depth].p_ext;
+ 	ee_block = le32_to_cpu(ex->ee_block);
+ 	ee_len = ext4_ext_get_actual_len(ex);
+-	allocated = ee_len - (iblock - ee_block);
+-	newblock = iblock - ee_block + ext4_ext_pblock(ex);
++	allocated = ee_len - (map->m_lblk - ee_block);
++	newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
+ 
+ 	ex2 = ex;
+ 	orig_ex.ee_block = ex->ee_block;
+@@ -3033,10 +3032,10 @@ static int ext4_ext_convert_to_initializ
+ 		return allocated;
+ 	}
+ 
+-	/* ex1: ee_block to iblock - 1 : uninitialized */
+-	if (iblock > ee_block) {
++	/* ex1: ee_block to map->m_lblk - 1 : uninitialized */
++	if (map->m_lblk > ee_block) {
+ 		ex1 = ex;
+-		ex1->ee_len = cpu_to_le16(iblock - ee_block);
++		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ 		ext4_ext_mark_uninitialized(ex1);
+ 		ext4_ext_dirty(handle, inode, path + depth);
+ 		ex2 = &newex;
+@@ -3046,15 +3045,15 @@ static int ext4_ext_convert_to_initializ
+ 	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
+ 	 * overlap of blocks.
+ 	 */
+-	if (!ex1 && allocated > max_blocks)
+-		ex2->ee_len = cpu_to_le16(max_blocks);
++	if (!ex1 && allocated > map->m_len)
++		ex2->ee_len = cpu_to_le16(map->m_len);
+ 	/* ex3: to ee_block + ee_len : uninitialised */
+-	if (allocated > max_blocks) {
++	if (allocated > map->m_len) {
+ 		unsigned int newdepth;
+ 		/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
+ 		if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
+ 			/*
+-			 * iblock == ee_block is handled by the zerouout
++			 * map->m_lblk == ee_block is handled by the zerouout
+ 			 * at the beginning.
+ 			 * Mark first half uninitialized.
+ 			 * Mark second half initialized and zero out the
+@@ -3067,7 +3066,7 @@ static int ext4_ext_convert_to_initializ
+ 			ext4_ext_dirty(handle, inode, path + depth);
+ 
+ 			ex3 = &newex;
+-			ex3->ee_block = cpu_to_le32(iblock);
++			ex3->ee_block = cpu_to_le32(map->m_lblk);
+ 			ext4_ext_store_pblock(ex3, newblock);
+ 			ex3->ee_len = cpu_to_le16(allocated);
+ 			err = ext4_ext_insert_extent(handle, inode, path,
+@@ -3081,7 +3080,7 @@ static int ext4_ext_convert_to_initializ
+ 				ext4_ext_store_pblock(ex,
+ 					ext4_ext_pblock(&orig_ex));
+ 				ext4_ext_dirty(handle, inode, path + depth);
+-				/* blocks available from iblock */
++				/* blocks available from map->m_lblk */
+ 				return allocated;
+ 
+ 			} else if (err)
+@@ -3103,8 +3102,8 @@ static int ext4_ext_convert_to_initializ
+ 				 */
+ 				depth = ext_depth(inode);
+ 				ext4_ext_drop_refs(path);
+-				path = ext4_ext_find_extent(inode,
+-								iblock, path);
++				path = ext4_ext_find_extent(inode, map->m_lblk,
++							    path);
+ 				if (IS_ERR(path)) {
+ 					err = PTR_ERR(path);
+ 					return err;
+@@ -3124,9 +3123,9 @@ static int ext4_ext_convert_to_initializ
+ 			return allocated;
+ 		}
+ 		ex3 = &newex;
+-		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+-		ext4_ext_store_pblock(ex3, newblock + max_blocks);
+-		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
++		ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
++		ext4_ext_store_pblock(ex3, newblock + map->m_len);
++		ex3->ee_len = cpu_to_le16(allocated - map->m_len);
+ 		ext4_ext_mark_uninitialized(ex3);
+ 		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
+ 		if (err == -ENOSPC && may_zeroout) {
+@@ -3139,7 +3138,7 @@ static int ext4_ext_convert_to_initializ
+ 			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ 			ext4_ext_dirty(handle, inode, path + depth);
+ 			/* zeroed the full extent */
+-			/* blocks available from iblock */
++			/* blocks available from map->m_lblk */
+ 			return allocated;
+ 
+ 		} else if (err)
+@@ -3159,7 +3158,7 @@ static int ext4_ext_convert_to_initializ
+ 
+ 		depth = newdepth;
+ 		ext4_ext_drop_refs(path);
+-		path = ext4_ext_find_extent(inode, iblock, path);
++		path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ 		if (IS_ERR(path)) {
+ 			err = PTR_ERR(path);
+ 			goto out;
+@@ -3173,14 +3172,14 @@ static int ext4_ext_convert_to_initializ
+ 		if (err)
+ 			goto out;
+ 
+-		allocated = max_blocks;
++		allocated = map->m_len;
+ 
+ 		/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
+ 		 * to insert a extent in the middle zerout directly
+ 		 * otherwise give the extent a chance to merge to left
+ 		 */
+ 		if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
+-			iblock != ee_block && may_zeroout) {
++			map->m_lblk != ee_block && may_zeroout) {
+ 			err =  ext4_ext_zeroout(inode, &orig_ex);
+ 			if (err)
+ 				goto fix_extent_len;
+@@ -3190,7 +3189,7 @@ static int ext4_ext_convert_to_initializ
+ 			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ 			ext4_ext_dirty(handle, inode, path + depth);
+ 			/* zero out the first half */
+-			/* blocks available from iblock */
++			/* blocks available from map->m_lblk */
+ 			return allocated;
+ 		}
+ 	}
+@@ -3201,13 +3200,13 @@ static int ext4_ext_convert_to_initializ
+ 	 */
+ 	if (ex1 && ex1 != ex) {
+ 		ex1 = ex;
+-		ex1->ee_len = cpu_to_le16(iblock - ee_block);
++		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ 		ext4_ext_mark_uninitialized(ex1);
+ 		ext4_ext_dirty(handle, inode, path + depth);
+ 		ex2 = &newex;
+ 	}
+-	/* ex2: iblock to iblock + maxblocks-1 : initialised */
+-	ex2->ee_block = cpu_to_le32(iblock);
++	/* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
++	ex2->ee_block = cpu_to_le32(map->m_lblk);
+ 	ext4_ext_store_pblock(ex2, newblock);
+ 	ex2->ee_len = cpu_to_le16(allocated);
+ 	if (ex2 != ex)
+@@ -3277,7 +3276,7 @@ fix_extent_len:
+ }
+ 
+ /*
+- * This function is called by ext4_ext_get_blocks() from
++ * This function is called by ext4_ext_map_blocks() from
+  * ext4_get_blocks_dio_write() when DIO to write
+  * to an uninitialized extent.
+  *
+@@ -3300,9 +3299,8 @@ fix_extent_len:
+  */
+ static int ext4_split_unwritten_extents(handle_t *handle,
+ 					struct inode *inode,
++					struct ext4_map_blocks *map,
+ 					struct ext4_ext_path *path,
+-					ext4_lblk_t iblock,
+-					unsigned int max_blocks,
+ 					int flags)
+ {
+ 	struct ext4_extent *ex, newex, orig_ex;
+@@ -3318,20 +3316,20 @@ static int ext4_split_unwritten_extents(
+ 
+ 	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
+ 		"block %llu, max_blocks %u\n", inode->i_ino,
+-		(unsigned long long)iblock, max_blocks);
++		(unsigned long long)map->m_lblk, map->m_len);
+ 
+ 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ 		inode->i_sb->s_blocksize_bits;
+-	if (eof_block < iblock + max_blocks)
+-		eof_block = iblock + max_blocks;
++	if (eof_block < map->m_lblk + map->m_len)
++		eof_block = map->m_lblk + map->m_len;
+ 
+ 	depth = ext_depth(inode);
+ 	eh = path[depth].p_hdr;
+ 	ex = path[depth].p_ext;
+ 	ee_block = le32_to_cpu(ex->ee_block);
+ 	ee_len = ext4_ext_get_actual_len(ex);
+-	allocated = ee_len - (iblock - ee_block);
+-	newblock = iblock - ee_block + ext4_ext_pblock(ex);
++	allocated = ee_len - (map->m_lblk - ee_block);
++	newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
+ 
+ 	ex2 = ex;
+ 	orig_ex.ee_block = ex->ee_block;
+@@ -3349,16 +3347,16 @@ static int ext4_split_unwritten_extents(
+  	 * block where the write begins, and the write completely
+  	 * covers the extent, then we don't need to split it.
+  	 */
+-	if ((iblock == ee_block) && (allocated <= max_blocks))
++	if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
+ 		return allocated;
+ 
+ 	err = ext4_ext_get_access(handle, inode, path + depth);
+ 	if (err)
+ 		goto out;
+-	/* ex1: ee_block to iblock - 1 : uninitialized */
+-	if (iblock > ee_block) {
++	/* ex1: ee_block to map->m_lblk - 1 : uninitialized */
++	if (map->m_lblk > ee_block) {
+ 		ex1 = ex;
+-		ex1->ee_len = cpu_to_le16(iblock - ee_block);
++		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ 		ext4_ext_mark_uninitialized(ex1);
+ 		ext4_ext_dirty(handle, inode, path + depth);
+ 		ex2 = &newex;
+@@ -3368,15 +3366,15 @@ static int ext4_split_unwritten_extents(
+ 	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
+ 	 * overlap of blocks.
+ 	 */
+-	if (!ex1 && allocated > max_blocks)
+-		ex2->ee_len = cpu_to_le16(max_blocks);
++	if (!ex1 && allocated > map->m_len)
++		ex2->ee_len = cpu_to_le16(map->m_len);
+ 	/* ex3: to ee_block + ee_len : uninitialised */
+-	if (allocated > max_blocks) {
++	if (allocated > map->m_len) {
+ 		unsigned int newdepth;
+ 		ex3 = &newex;
+-		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+-		ext4_ext_store_pblock(ex3, newblock + max_blocks);
+-		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
++		ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
++		ext4_ext_store_pblock(ex3, newblock + map->m_len);
++		ex3->ee_len = cpu_to_le16(allocated - map->m_len);
+ 		ext4_ext_mark_uninitialized(ex3);
+ 		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
+ 		if (err == -ENOSPC && may_zeroout) {
+@@ -3400,8 +3398,8 @@ static int ext4_split_unwritten_extents(
+ 				err =  ext4_ext_zeroout(inode, ex3);
+ 				if (err)
+ 					goto fix_extent_len;
+-				max_blocks = allocated;
+-				ex2->ee_len = cpu_to_le16(max_blocks);
++				map->m_len = allocated;
++				ex2->ee_len = cpu_to_le16(map->m_len);
+ 				goto skip;
+ 			}
+ 			err =  ext4_ext_zeroout(inode, &orig_ex);
+@@ -3413,7 +3411,7 @@ static int ext4_split_unwritten_extents(
+ 			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ 			ext4_ext_dirty(handle, inode, path + depth);
+ 			/* zeroed the full extent */
+-			/* blocks available from iblock */
++			/* blocks available from map->m_lblk */
+ 			return allocated;
+ 
+ 		} else if (err)
+@@ -3433,7 +3431,7 @@ static int ext4_split_unwritten_extents(
+ 
+ 		depth = newdepth;
+ 		ext4_ext_drop_refs(path);
+-		path = ext4_ext_find_extent(inode, iblock, path);
++		path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ 		if (IS_ERR(path)) {
+ 			err = PTR_ERR(path);
+ 			goto out;
+@@ -3446,8 +3444,7 @@ static int ext4_split_unwritten_extents(
+ 		err = ext4_ext_get_access(handle, inode, path + depth);
+ 		if (err)
+ 			goto out;
+-
+-		allocated = max_blocks;
++		allocated = map->m_len;
+ 	}
+ skip:
+ 	/*
+@@ -3457,16 +3454,16 @@ skip:
+ 	 */
+ 	if (ex1 && ex1 != ex) {
+ 		ex1 = ex;
+-		ex1->ee_len = cpu_to_le16(iblock - ee_block);
++		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ 		ext4_ext_mark_uninitialized(ex1);
+ 		ext4_ext_dirty(handle, inode, path + depth);
+ 		ex2 = &newex;
+ 	}
+ 	/*
+-	 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
+-	 * uninitialised still.
++	 * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
++	 * using direct I/O, uninitialised still.
+ 	 */
+-	ex2->ee_block = cpu_to_le32(iblock);
++	ex2->ee_block = cpu_to_le32(map->m_lblk);
+ 	ext4_ext_store_pblock(ex2, newblock);
+ 	ex2->ee_len = cpu_to_le16(allocated);
+ 	ext4_ext_mark_uninitialized(ex2);
+@@ -3506,8 +3503,7 @@ fix_extent_len:
+ 
+ static int ext4_convert_unwritten_extents_dio(handle_t *handle,
+ 					      struct inode *inode,
+-					      ext4_lblk_t iblock,
+-					      unsigned int max_blocks,
++					      struct ext4_map_blocks *map,
+ 					      struct ext4_ext_path *path)
+ {
+ 	struct ext4_extent *ex;
+@@ -3529,14 +3525,13 @@ static int ext4_convert_unwritten_extent
+ 
+ 	/* If extent is larger than requested then split is required */
+ 
+-	if (ee_block != iblock || ee_len > max_blocks) {
+-		err = ext4_split_unwritten_extents(handle, inode, path,
+-					iblock, max_blocks,
++	if (ee_block != map->m_lblk || ee_len > map->m_len) {
++		err = ext4_split_unwritten_extents(handle, inode, map, path,
+ 					EXT4_EXT_DATA_VALID);
+ 		if (err < 0)
+ 			goto out;
+ 		ext4_ext_drop_refs(path);
+-		path = ext4_ext_find_extent(inode, iblock, path);
++		path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ 		if (IS_ERR(path)) {
+ 			err = PTR_ERR(path);
+ 			goto out;
+@@ -3627,10 +3622,9 @@ out:
+ 
+ static int
+ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+-			ext4_lblk_t iblock, unsigned int max_blocks,
++			struct ext4_map_blocks *map,
+ 			struct ext4_ext_path *path, int flags,
+-			unsigned int allocated, struct buffer_head *bh_result,
+-			ext4_fsblk_t newblock)
++			unsigned int allocated, ext4_fsblk_t newblock)
+ {
+ 	int ret = 0;
+ 	int err = 0;
+@@ -3638,7 +3632,7 @@ ext4_ext_handle_uninitialized_extents(ha
+ 
+ 	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
+ 		  "block %llu, max_blocks %u, flags %d, allocated %u",
+-		  inode->i_ino, (unsigned long long)iblock, max_blocks,
++		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
+ 		  flags, allocated);
+ 	ext4_ext_show_leaf(inode, path);
+ 
+@@ -3651,9 +3645,8 @@ ext4_ext_handle_uninitialized_extents(ha
+ 	/* DIO get_block() before submit the IO, split the extent */
+ 	if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
+ 	    EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
+-		ret = ext4_split_unwritten_extents(handle,
+-						inode, path, iblock,
+-						max_blocks, flags);
++		ret = ext4_split_unwritten_extents(handle, inode, map,
++						   path, flags);
+ 		if (ret <= 0)
+ 			goto out;
+ 		/*
+@@ -3674,12 +3667,11 @@ ext4_ext_handle_uninitialized_extents(ha
+ 	if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
+ 	    EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
+ 		ret = ext4_convert_unwritten_extents_dio(handle, inode,
+-							 iblock, max_blocks,
+-							 path);
++							 map, path);
+ 		if (ret >= 0) {
+ 			ext4_update_inode_fsync_trans(handle, inode, 1);
+-			err = check_eofblocks_fl(handle, inode, iblock, path,
+-						 max_blocks);
++			err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
++						 map->m_len);
+ 		} else
+ 			err = ret;
+ 		goto out2;
+@@ -3701,18 +3693,15 @@ ext4_ext_handle_uninitialized_extents(ha
+ 		 * the buffer head will be unmapped so that
+ 		 * a read from the block returns 0s.
+ 		 */
+-		set_buffer_unwritten(bh_result);
++		map->m_flags |= EXT4_MAP_UNWRITTEN;
+ 		goto out1;
+ 	}
+ 
+ 	/* buffered write, writepage time, convert*/
+-	ret = ext4_ext_convert_to_initialized(handle, inode,
+-						path, iblock,
+-						max_blocks,
+-						flags);
++	ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
+ 	if (ret >= 0) {
+ 		ext4_update_inode_fsync_trans(handle, inode, 1);
+-		err = check_eofblocks_fl(handle, inode, iblock, path, max_blocks);
++		err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
+ 		if (err < 0)
+ 			goto out2;
+ 	}
+@@ -3722,7 +3711,7 @@ out:
+ 		goto out2;
+ 	} else
+ 		allocated = ret;
+-	set_buffer_new(bh_result);
++	map->m_flags |= EXT4_MAP_NEW;
+ 	/*
+ 	 * if we allocated more blocks than requested
+ 	 * we need to make sure we unmap the extra block
+@@ -3730,11 +3719,11 @@ out:
+ 	 * unmapped later when we find the buffer_head marked
+ 	 * new.
+ 	 */
+-	if (allocated > max_blocks) {
++	if (allocated > map->m_len) {
+ 		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
+-					newblock + max_blocks,
+-					allocated - max_blocks);
+-		allocated = max_blocks;
++					newblock + map->m_len,
++					allocated - map->m_len);
++		allocated = map->m_len;
+ 	}
+ 
+ 	/*
+@@ -3748,13 +3737,13 @@ out:
+ 		ext4_da_update_reserve_space(inode, allocated, 0);
+ 
+ map_out:
+-	set_buffer_mapped(bh_result);
++	map->m_flags |= EXT4_MAP_MAPPED;
+ out1:
+-	if (allocated > max_blocks)
+-		allocated = max_blocks;
++	if (allocated > map->m_len)
++		allocated = map->m_len;
+ 	ext4_ext_show_leaf(inode, path);
+-	bh_result->b_bdev = inode->i_sb->s_bdev;
+-	bh_result->b_blocknr = newblock;
++	map->m_pblk = newblock;
++	map->m_len = allocated;
+ out2:
+ 	if (path) {
+ 		ext4_ext_drop_refs(path);
+@@ -3781,10 +3770,8 @@ out2:
+  *
+  * return < 0, error case.
+  */
+-int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
+-			ext4_lblk_t iblock,
+-			unsigned int max_blocks, struct buffer_head *bh_result,
+-			int flags)
++int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
++			struct ext4_map_blocks *map, int flags)
+ {
+ 	struct ext4_ext_path *path = NULL;
+ 	struct ext4_extent_header *eh;
+@@ -3796,12 +3783,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	ext4_io_end_t *io = ext4_inode_aio(inode);
+ 	int set_unwritten = 0;
+ 
+-	__clear_bit(BH_New, &bh_result->b_state);
+ 	ext_debug("blocks %u/%u requested for inode %lu\n",
+-			iblock, max_blocks, inode->i_ino);
++		  map->m_lblk, map->m_len, inode->i_ino);
+ 
+ 	/* check in cache */
+-	if (ext4_ext_in_cache(inode, iblock, &newex)) {
++	if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
+ 		if (!newex.ee_start_lo && !newex.ee_start_hi) {
+ 			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
+ 				/*
+@@ -3813,18 +3799,18 @@ int ext4_ext_get_blocks(handle_t *handle
+ 			/* we should allocate requested block */
+ 		} else {
+ 			/* block is already allocated */
+-			newblock = iblock
++			newblock = map->m_lblk
+ 				   - le32_to_cpu(newex.ee_block)
+ 				   + ext4_ext_pblock(&newex);
+ 			/* number of remaining blocks in the extent */
+ 			allocated = ext4_ext_get_actual_len(&newex) -
+-					(iblock - le32_to_cpu(newex.ee_block));
++					(map->m_lblk - le32_to_cpu(newex.ee_block));
+ 			goto out;
+ 		}
+ 	}
+ 
+ 	/* find extent for this block */
+-	path = ext4_ext_find_extent(inode, iblock, NULL);
++	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
+ 	if (IS_ERR(path)) {
+ 		err = PTR_ERR(path);
+ 		path = NULL;
+@@ -3841,7 +3827,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
+ 		EXT4_ERROR_INODE(inode, "bad extent address "
+ 				 "iblock: %d, depth: %d pblock %lld",
+-				 iblock, depth, path[depth].p_block);
++				 map->m_lblk, depth, path[depth].p_block);
+ 		err = -EIO;
+ 		goto out2;
+ 	}
+@@ -3859,11 +3845,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ 		 */
+ 		ee_len = ext4_ext_get_actual_len(ex);
+ 		/* if found extent covers block, simply return it */
+-		if (in_range(iblock, ee_block, ee_len)) {
+-			newblock = iblock - ee_block + ee_start;
++		if (in_range(map->m_lblk, ee_block, ee_len)) {
++			newblock = map->m_lblk - ee_block + ee_start;
+ 			/* number of remaining blocks in the extent */
+-			allocated = ee_len - (iblock - ee_block);
+-			ext_debug("%u fit into %u:%d -> %llu\n", iblock,
++			allocated = ee_len - (map->m_lblk - ee_block);
++			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
+ 					ee_block, ee_len, newblock);
+ 
+ 			/*
+@@ -3875,9 +3861,9 @@ int ext4_ext_get_blocks(handle_t *handle
+ 					ee_len, ee_start);
+ 				goto out;
+ 			}
+-			ret = ext4_ext_handle_uninitialized_extents(
+-				handle, inode, iblock, max_blocks, path,
+-				flags, allocated, bh_result, newblock);
++			ret = ext4_ext_handle_uninitialized_extents(handle,
++					inode, map, path, flags, allocated,
++					newblock);
+ 			return ret;
+ 		}
+ 	}
+@@ -3891,7 +3877,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ 		 * put just found gap into cache to speed up
+ 		 * subsequent requests
+ 		 */
+-		ext4_ext_put_gap_in_cache(inode, path, iblock);
++		ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
+ 		goto out2;
+ 	}
+ 	/*
+@@ -3899,11 +3885,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	 */
+ 
+ 	/* find neighbour allocated blocks */
+-	ar.lleft = iblock;
++	ar.lleft = map->m_lblk;
+ 	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
+ 	if (err)
+ 		goto out2;
+-	ar.lright = iblock;
++	ar.lright = map->m_lblk;
+ 	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
+ 	if (err)
+ 		goto out2;
+@@ -3914,26 +3900,26 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
+ 	 * EXT_UNINIT_MAX_LEN.
+ 	 */
+-	if (max_blocks > EXT_INIT_MAX_LEN &&
++	if (map->m_len > EXT_INIT_MAX_LEN &&
+ 	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
+-		max_blocks = EXT_INIT_MAX_LEN;
+-	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
++		map->m_len = EXT_INIT_MAX_LEN;
++	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
+ 		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
+-		max_blocks = EXT_UNINIT_MAX_LEN;
++		map->m_len = EXT_UNINIT_MAX_LEN;
+ 
+-	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
+-	newex.ee_block = cpu_to_le32(iblock);
+-	newex.ee_len = cpu_to_le16(max_blocks);
++	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
++	newex.ee_block = cpu_to_le32(map->m_lblk);
++	newex.ee_len = cpu_to_le16(map->m_len);
+ 	err = ext4_ext_check_overlap(inode, &newex, path);
+ 	if (err)
+ 		allocated = ext4_ext_get_actual_len(&newex);
+ 	else
+-		allocated = max_blocks;
++		allocated = map->m_len;
+ 
+ 	/* allocate new block */
+ 	ar.inode = inode;
+-	ar.goal = ext4_ext_find_goal(inode, path, iblock);
+-	ar.logical = iblock;
++	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
++	ar.logical = map->m_lblk;
+ 	ar.len = allocated;
+ 	if (S_ISREG(inode->i_mode))
+ 		ar.flags = EXT4_MB_HINT_DATA;
+@@ -3966,7 +3952,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ 			set_unwritten = 1;
+ 	}
+ 
+-	err = check_eofblocks_fl(handle, inode, iblock, path, ar.len);
++	err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
+ 	if (err)
+ 		goto out2;
+ 
+@@ -3997,9 +3983,9 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	/* previous routine could use block we allocated */
+ 	newblock = ext4_ext_pblock(&newex);
+ 	allocated = ext4_ext_get_actual_len(&newex);
+-	if (allocated > max_blocks)
+-		allocated = max_blocks;
+-	set_buffer_new(bh_result);
++	if (allocated > map->m_len)
++		allocated = map->m_len;
++	map->m_flags |= EXT4_MAP_NEW;
+ 
+ 	/*
+ 	 * Update reserved blocks/metadata blocks after successful
+@@ -4013,17 +3999,17 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	 * when it is _not_ an uninitialized extent.
+ 	 */
+ 	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
+-		ext4_ext_put_in_cache(inode, iblock, allocated, newblock);
++		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
+ 		ext4_update_inode_fsync_trans(handle, inode, 1);
+ 	} else
+ 		ext4_update_inode_fsync_trans(handle, inode, 0);
+ out:
+-	if (allocated > max_blocks)
+-		allocated = max_blocks;
++	if (allocated > map->m_len)
++		allocated = map->m_len;
+ 	ext4_ext_show_leaf(inode, path);
+-	set_buffer_mapped(bh_result);
+-	bh_result->b_bdev = inode->i_sb->s_bdev;
+-	bh_result->b_blocknr = newblock;
++	map->m_flags |= EXT4_MAP_MAPPED;
++	map->m_pblk = newblock;
++	map->m_len = allocated;
+ out2:
+ 	if (path) {
+ 		ext4_ext_drop_refs(path);
+@@ -4206,7 +4192,7 @@ retry:
+ 		if (ret <= 0) {
+ #ifdef EXT4FS_DEBUG
+ 			WARN_ON(ret <= 0);
+-			printk(KERN_ERR "%s: ext4_ext_get_blocks "
++			printk(KERN_ERR "%s: ext4_ext_map_blocks "
+ 				    "returned error inode#%lu, block=%u, "
+ 				    "max_blocks=%u", __func__,
+ 				    inode->i_ino, block, max_blocks);
+@@ -4720,6 +4706,5 @@ EXPORT_SYMBOL(ext4_ext_insert_extent);
+ EXPORT_SYMBOL(ext4_mb_new_blocks);
+ EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
+ EXPORT_SYMBOL(ext4_mark_inode_dirty);
+-EXPORT_SYMBOL(ext4_ext_walk_space);
+ EXPORT_SYMBOL(ext4_ext_find_extent);
+ EXPORT_SYMBOL(ext4_ext_drop_refs);
+Index: linux-stage/fs/ext4/inode.c
+===================================================================
+--- linux-stage.orig/fs/ext4/inode.c	2016-07-15 12:13:05.000000000 +0300
++++ linux-stage/fs/ext4/inode.c	2016-07-15 12:15:36.000000000 +0300
+@@ -200,7 +200,7 @@ int ext4_truncate_restart_trans(handle_t
+ 	int ret;
+ 
+ 	/*
+-	 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
++	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
+ 	 * moment, get_block can be called only for blocks inside i_size since
+ 	 * page cache has been already dropped and writes are blocked by
+ 	 * i_mutex. So we can safely drop the i_data_sem here.
+@@ -970,9 +970,9 @@ err_out:
+ }
+ 
+ /*
+- * The ext4_ind_get_blocks() function handles non-extents inodes
++ * The ext4_ind_map_blocks() function handles non-extents inodes
+  * (i.e., using the traditional indirect/double-indirect i_blocks
+- * scheme) for ext4_get_blocks().
++ * scheme) for ext4_map_blocks().
+  *
+  * Allocation strategy is simple: if we have to allocate something, we will
+  * have to go the whole way to leaf. So let's do it before attaching anything
+@@ -991,15 +991,14 @@ err_out:
+  * return = 0, if plain lookup failed.
+  * return < 0, error case.
+  *
+- * The ext4_ind_get_blocks() function should be called with
++ * The ext4_ind_map_blocks() function should be called with
+  * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
+  * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
+  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
+  * blocks.
+  */
+-static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
+-			       ext4_lblk_t iblock, unsigned int maxblocks,
+-			       struct buffer_head *bh_result,
++static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
++			       struct ext4_map_blocks *map,
+ 			       int flags)
+ {
+ 	int err = -EIO;
+@@ -1015,7 +1014,7 @@ static int ext4_ind_get_blocks(handle_t 
+ 
+ 	J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
+ 	J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
+-	depth = ext4_block_to_path(inode, iblock, offsets,
++	depth = ext4_block_to_path(inode, map->m_lblk, offsets,
+ 				   &blocks_to_boundary);
+ 
+ 	if (depth == 0)
+@@ -1026,10 +1025,9 @@ static int ext4_ind_get_blocks(handle_t 
+ 	/* Simplest case - block found, no allocation needed */
+ 	if (!partial) {
+ 		first_block = le32_to_cpu(chain[depth - 1].key);
+-		clear_buffer_new(bh_result);
+ 		count++;
+ 		/*map more blocks*/
+-		while (count < maxblocks && count <= blocks_to_boundary) {
++		while (count < map->m_len && count <= blocks_to_boundary) {
+ 			ext4_fsblk_t blk;
+ 
+ 			blk = le32_to_cpu(*(chain[depth-1].p + count));
+@@ -1049,7 +1047,7 @@ static int ext4_ind_get_blocks(handle_t 
+ 	/*
+ 	 * Okay, we need to do block allocation.
+ 	*/
+-	goal = ext4_find_goal(inode, iblock, partial);
++	goal = ext4_find_goal(inode, map->m_lblk, partial);
+ 
+ 	/* the number of blocks need to allocate for [d,t]indirect blocks */
+ 	indirect_blks = (chain + depth) - partial - 1;
+@@ -1059,11 +1057,11 @@ static int ext4_ind_get_blocks(handle_t 
+ 	 * direct blocks to allocate for this branch.
+ 	 */
+ 	count = ext4_blks_to_allocate(partial, indirect_blks,
+-					maxblocks, blocks_to_boundary);
++				      map->m_len, blocks_to_boundary);
+ 	/*
+ 	 * Block out ext4_truncate while we alter the tree
+ 	 */
+-	err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
++	err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
+ 				&count, goal,
+ 				offsets + (partial - chain), partial);
+ 
+@@ -1075,18 +1073,20 @@ static int ext4_ind_get_blocks(handle_t 
+ 	 * may need to return -EAGAIN upwards in the worst case.  --sct
+ 	 */
+ 	if (!err)
+-		err = ext4_splice_branch(handle, inode, iblock,
++		err = ext4_splice_branch(handle, inode, map->m_lblk,
+ 					 partial, indirect_blks, count);
+ 	if (err)
+ 		goto cleanup;
+ 
+-	set_buffer_new(bh_result);
++	map->m_flags |= EXT4_MAP_NEW;
+ 
+ 	ext4_update_inode_fsync_trans(handle, inode, 1);
+ got_it:
+-	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
++	map->m_flags |= EXT4_MAP_MAPPED;
++	map->m_pblk = le32_to_cpu(chain[depth-1].key);
++	map->m_len = count;
+ 	if (count > blocks_to_boundary)
+-		set_buffer_boundary(bh_result);
++		map->m_flags |= EXT4_MAP_BOUNDARY;
+ 	err = count;
+ 	/* Clean up and exit */
+ 	partial = chain + depth - 1;	/* the whole chain */
+@@ -1096,7 +1096,6 @@ cleanup:
+ 		brelse(partial->bh);
+ 		partial--;
+ 	}
+-	BUFFER_TRACE(bh_result, "returned");
+ out:
+ 	return err;
+ }
+@@ -1291,15 +1290,15 @@ static pgoff_t ext4_num_dirty_pages(stru
+ }
+ 
+ /*
+- * The ext4_get_blocks() function tries to look up the requested blocks,
++ * The ext4_map_blocks() function tries to look up the requested blocks,
+  * and returns if the blocks are already mapped.
+  *
+  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
+  * and store the allocated blocks in the result buffer head and mark it
+  * mapped.
+  *
+- * If file type is extents based, it will call ext4_ext_get_blocks(),
+- * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
++ * If file type is extents based, it will call ext4_ext_map_blocks(),
++ * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
+  * based files
+  *
+  * On success, it returns the number of blocks being mapped or allocate.
+@@ -1312,39 +1311,33 @@ static pgoff_t ext4_num_dirty_pages(stru
+  *
+  * It returns the error in case of allocation failure.
+  */
+-int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
+-		    unsigned int max_blocks, struct buffer_head *bh,
+-		    int flags)
++int ext4_map_blocks(handle_t *handle, struct inode *inode,
++		    struct ext4_map_blocks *map, int flags)
+ {
+ 	int retval;
++ 
++	map->m_flags = 0;
++	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
++		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
++		  (unsigned long) map->m_lblk);
+ 
+-	clear_buffer_mapped(bh);
+-	clear_buffer_unwritten(bh);
+-
+-	ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
+-		  "logical block %lu\n", inode->i_ino, flags, max_blocks,
+-		  (unsigned long)block);
+ 	/*
+ 	 * Try to see if we can get the block without requesting a new
+ 	 * file system block.
+ 	 */
+ 	down_read((&EXT4_I(inode)->i_data_sem));
+ 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+-		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
+-				bh, 0);
++		retval = ext4_ext_map_blocks(handle, inode, map, 0);
+ 	} else {
+-		retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
+-					     bh, 0);
++		retval = ext4_ind_map_blocks(handle, inode, map, 0);
+ 	}
+ 	up_read((&EXT4_I(inode)->i_data_sem));
+ 
+-	if (retval > 0 && buffer_mapped(bh)) {
++	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+ 		int ret = check_block_validity(inode, "file system corruption",
+-					       block, bh->b_blocknr, retval);
+-		if (ret != 0) {
+-			bh->b_blocknr = 0;
++					map->m_lblk, map->m_pblk, retval);
++		if (ret != 0)
+ 			return ret;
+-		}
+ 	}
+ 
+ 	/* If it is only a block(s) look up */
+@@ -1358,7 +1351,7 @@ int ext4_get_blocks(handle_t *handle, st
+ 	 * ext4_ext_get_block() returns th create = 0
+ 	 * with buffer head unmapped.
+ 	 */
+-	if (retval > 0 && buffer_mapped(bh))
++	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
+ 		return retval;
+ 
+ 	/*
+@@ -1371,7 +1364,7 @@ int ext4_get_blocks(handle_t *handle, st
+ 	 * of BH_Unwritten and BH_Mapped flags being simultaneously
+ 	 * set on the buffer_head.
+ 	 */
+-	clear_buffer_unwritten(bh);
++	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
+ 
+ 	/*
+ 	 * New blocks allocate and/or writing to uninitialized extent
+@@ -1394,13 +1387,11 @@ int ext4_get_blocks(handle_t *handle, st
+ 	 * could have changed the inode type in between
+ 	 */
+ 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+-		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
+-					      bh, flags);
++		retval = ext4_ext_map_blocks(handle, inode, map, flags);
+ 	} else {
+-		retval = ext4_ind_get_blocks(handle, inode, block,
+-					     max_blocks, bh, flags);
++		retval = ext4_ind_map_blocks(handle, inode, map, flags);
+ 
+-		if (retval > 0 && buffer_new(bh)) {
++		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
+ 			/*
+ 			 * We allocated new blocks which will result in
+ 			 * i_data's format changing.  Force the migrate
+@@ -1423,17 +1414,39 @@ int ext4_get_blocks(handle_t *handle, st
+ 		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
+ 
+ 	up_write((&EXT4_I(inode)->i_data_sem));
+-	if (retval > 0 && buffer_mapped(bh)) {
++	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+ 		int ret = check_block_validity(inode, "file system "
+ 					       "corruption after allocation",
+-					       block, bh->b_blocknr, retval);
++					       map->m_lblk, map->m_pblk,
++					       retval);
+ 		if (ret != 0) {
+-			bh->b_blocknr = 0;
+ 			return ret;
+ 		}
+ 	}
+ 	return retval;
+ }
++EXPORT_SYMBOL(ext4_map_blocks);
++
++int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
++		    unsigned int max_blocks, struct buffer_head *bh,
++		    int flags)
++{
++	struct ext4_map_blocks map;
++	int ret;
++
++	map.m_lblk = block;
++	map.m_len = max_blocks;
++
++	ret = ext4_map_blocks(handle, inode, &map, flags);
++	if (ret < 0)
++		return ret;
++
++	bh->b_blocknr = map.m_pblk;
++	bh->b_size = inode->i_sb->s_blocksize * map.m_len;
++	bh->b_bdev = inode->i_sb->s_bdev;
++	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
++	return ret;
++}
+ 
+ /* Maximum number of blocks we map for direct IO at once. */
+ #define DIO_MAX_BLOCKS 4096
diff --git a/ldiskfs/kernel_patches/patches/rhel6.6/ext4_s_max_ext_tree_depth.patch b/ldiskfs/kernel_patches/patches/rhel6.6/ext4_s_max_ext_tree_depth.patch
index 96c9b89..b14d599 100644
--- a/ldiskfs/kernel_patches/patches/rhel6.6/ext4_s_max_ext_tree_depth.patch
+++ b/ldiskfs/kernel_patches/patches/rhel6.6/ext4_s_max_ext_tree_depth.patch
@@ -5,11 +5,11 @@ current and unsafe implementation with ext4_ext_path[] array
 re-[sizing,allocation], even with more recent and related patches
 that will be integrated in more recent Kernels.
 
-Index: linux-2.6.32-504.el6.x86_64/fs/ext4/ext4.h
+Index: linux-stage/fs/ext4/ext4.h
 ===================================================================
---- linux-2.6.32-504.el6.x86_64.orig/fs/ext4/ext4.h
-+++ linux-2.6.32-504.el6.x86_64/fs/ext4/ext4.h
-@@ -1147,6 +1147,9 @@
+--- linux-stage.orig/fs/ext4/ext4.h	2016-07-15 10:55:51.000000000 +0300
++++ linux-stage/fs/ext4/ext4.h	2016-07-15 10:56:19.000000000 +0300
+@@ -1153,6 +1153,9 @@ struct ext4_sb_info {
  	unsigned long s_ext_extents;
  #endif
  
@@ -19,32 +19,11 @@ Index: linux-2.6.32-504.el6.x86_64/fs/ext4/ext4.h
  	/* for buddy allocator */
  	struct ext4_group_info ***s_group_info;
  	struct inode *s_buddy_cache;
-Index: linux-2.6.32-504.el6.x86_64/fs/ext4/super.c
+Index: linux-stage/fs/ext4/extents.c
 ===================================================================
---- linux-2.6.32-504.el6.x86_64.orig/fs/ext4/super.c
-+++ linux-2.6.32-504.el6.x86_64/fs/ext4/super.c
-@@ -3529,6 +3529,8 @@
- 		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
- 			goto failed_mount3;
- 
-+	ext4_ext_init(sb); /* needed before using extent-mapped journal */
-+
- 	/*
- 	 * The first inode we look at is the journal inode.  Don't try
- 	 * root first: it may be modified in the journal!
-@@ -3722,7 +3724,6 @@
- 		goto failed_mount4a;
- 	}
- 
--	ext4_ext_init(sb);
- 	err = ext4_mb_init(sb, needs_recovery);
- 	if (err) {
- 		ext4_msg(sb, KERN_ERR, "failed to initalize mballoc (%d)",
-Index: linux-2.6.32-504.el6.x86_64/fs/ext4/extents.c
-===================================================================
---- linux-2.6.32-504.el6.x86_64.orig/fs/ext4/extents.c
-+++ linux-2.6.32-504.el6.x86_64/fs/ext4/extents.c
-@@ -699,8 +699,9 @@
+--- linux-stage.orig/fs/ext4/extents.c	2016-07-15 10:55:51.000000000 +0300
++++ linux-stage/fs/ext4/extents.c	2016-07-15 10:56:19.000000000 +0300
+@@ -698,8 +698,9 @@ ext4_ext_find_extent(struct inode *inode
  
  	/* account possible depth increase */
  	if (!path) {
@@ -56,7 +35,7 @@ Index: linux-2.6.32-504.el6.x86_64/fs/ext4/extents.c
  		if (!path)
  			return ERR_PTR(-ENOMEM);
  		alloc = 1;
-@@ -1915,11 +1916,8 @@
+@@ -1907,11 +1908,8 @@ static int ext4_fill_fiemap_extents(stru
  		/* find extent for this block */
  		down_read(&EXT4_I(inode)->i_data_sem);
  
@@ -70,19 +49,17 @@ Index: linux-2.6.32-504.el6.x86_64/fs/ext4/extents.c
  
  		path = ext4_ext_find_extent(inode, block, path);
  		if (IS_ERR(path)) {
-@@ -2664,8 +2662,9 @@
+@@ -2656,7 +2654,8 @@ again:
  			path[k].p_block =
  				le16_to_cpu(path[k].p_hdr->eh_entries)+1;
  	} else {
 -		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
--			       GFP_NOFS);
 +		path = kzalloc(sizeof(struct ext4_ext_path) *
 +			       EXT4_SB(inode->i_sb)->s_max_ext_tree_depth,
-+			       GFP_NOFS);
+ 			       GFP_NOFS);
  		if (path == NULL) {
  			ext4_journal_stop(handle);
- 			return -ENOMEM;
-@@ -2790,13 +2789,15 @@
+@@ -2781,13 +2780,15 @@ out:
   */
  void ext4_ext_init(struct super_block *sb)
  {
@@ -100,7 +77,7 @@ Index: linux-2.6.32-504.el6.x86_64/fs/ext4/extents.c
  #ifdef AGGRESSIVE_TEST
  		printk(", aggressive tests");
  #endif
-@@ -2805,14 +2806,35 @@
+@@ -2796,14 +2797,35 @@ void ext4_ext_init(struct super_block *s
  #endif
  #ifdef EXTENTS_STATS
  		printk(", stats");
@@ -140,17 +117,24 @@ Index: linux-2.6.32-504.el6.x86_64/fs/ext4/extents.c
  	}
  }
  
-@@ -4614,11 +4636,8 @@
- 			break;
- 		}
- 
--		if (ext_depth(inode) != depth) {
--			/* depth was changed. we have to realloc path */
--			kfree(path);
--			path = NULL;
--		}
-+		/* path of max possible depth will be allocated during
-+		 * first pass, so its space can be re-used for each loop */
+Index: linux-stage/fs/ext4/super.c
+===================================================================
+--- linux-stage.orig/fs/ext4/super.c	2016-07-15 10:55:51.000000000 +0300
++++ linux-stage/fs/ext4/super.c	2016-07-15 10:56:19.000000000 +0300
+@@ -3529,6 +3529,8 @@ static int ext4_fill_super(struct super_
+ 		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
+ 			goto failed_mount3;
  
- 		block = cbex.ec_block + cbex.ec_len;
++	ext4_ext_init(sb); /* needed before using extent-mapped journal */
++
+ 	/*
+ 	 * The first inode we look at is the journal inode.  Don't try
+ 	 * root first: it may be modified in the journal!
+@@ -3722,7 +3724,6 @@ no_journal:
+ 		goto failed_mount4a;
  	}
+ 
+-	ext4_ext_init(sb);
+ 	err = ext4_mb_init(sb, needs_recovery);
+ 	if (err) {
+ 		ext4_msg(sb, KERN_ERR, "failed to initalize mballoc (%d)",
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.4.series b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.4.series
index daeda62..e057b99 100644
--- a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.4.series
+++ b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.4.series
@@ -2,7 +2,6 @@ rhel6.3/ext4-use-vzalloc-in-ext4_fill_flex_info.patch
 rhel6.3/ext4-introduce-ext4_kvmalloc-ext4_kzalloc-and-ext4_kvfree.patch
 rhel6.3/ext4-add-missing-kfree-on-error-return-path-in-add_new_gdb.patch
 rhel6.3/ext4-use-ext4_kvzalloc-ext4_kvmalloc-for-s_group_desc-and-s_group_info.patch
-rhel6.3/ext4-map_inode_page-2.6.18.patch
 rhel6.3/export-ext4-2.6.patch
 rhel6.3/ext4-remove-cond_resched-calls.patch
 rhel6.3/ext4-nlink-2.6.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.5.series b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.5.series
index d656b85..42434ef 100644
--- a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.5.series
+++ b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.5.series
@@ -2,7 +2,6 @@ rhel6.3/ext4-use-vzalloc-in-ext4_fill_flex_info.patch
 rhel6.3/ext4-introduce-ext4_kvmalloc-ext4_kzalloc-and-ext4_kvfree.patch
 rhel6.3/ext4-add-missing-kfree-on-error-return-path-in-add_new_gdb.patch
 rhel6.3/ext4-use-ext4_kvzalloc-ext4_kvmalloc-for-s_group_desc-and-s_group_info.patch
-rhel6.3/ext4-map_inode_page-2.6.18.patch
 rhel6.3/export-ext4-2.6.patch
 rhel6.3/ext4-remove-cond_resched-calls.patch
 rhel6.3/ext4-nlink-2.6.patch
@@ -28,8 +27,7 @@ rhel6.3/ext4-large-eas.patch
 rhel6.3/ext4-disable-mb-cache.patch
 rhel6.3/ext4-nocmtime-2.6.patch
 rhel6.3/ext4-journal-callback.patch
-rhel6.5/ext4-ext-walk-space.patch
-rhel6.3/ext4-store-tree-generation-at-find.patch
+rhel6.5/ext4-add-new-abstraction-ext4_map_blocks.patch
 rhel6.3/ext4-large-dir.patch
 rhel6.3/ext4-pdirop.patch
 rhel6.4/ext4-extra-isize.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.6.series b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.6.series
index e6fae81..9d6b172 100644
--- a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.6.series
+++ b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.6.series
@@ -2,7 +2,6 @@ rhel6.3/ext4-use-vzalloc-in-ext4_fill_flex_info.patch
 rhel6.3/ext4-introduce-ext4_kvmalloc-ext4_kzalloc-and-ext4_kvfree.patch
 rhel6.3/ext4-add-missing-kfree-on-error-return-path-in-add_new_gdb.patch
 rhel6.3/ext4-use-ext4_kvzalloc-ext4_kvmalloc-for-s_group_desc-and-s_group_info.patch
-rhel6.3/ext4-map_inode_page-2.6.18.patch
 rhel6.3/export-ext4-2.6.patch
 rhel6.3/ext4-remove-cond_resched-calls.patch
 rhel6.3/ext4-nlink-2.6.patch
@@ -27,8 +26,7 @@ rhel6.3/ext4-large-eas.patch
 rhel6.3/ext4-disable-mb-cache.patch
 rhel6.3/ext4-nocmtime-2.6.patch
 rhel6.3/ext4-journal-callback.patch
-rhel6.5/ext4-ext-walk-space.patch
-rhel6.3/ext4-store-tree-generation-at-find.patch
+rhel6.6/ext4-add-new-abstraction-ext4_map_blocks.patch
 rhel6.3/ext4-large-dir.patch
 rhel6.3/ext4-pdirop.patch
 rhel6.4/ext4-extra-isize.patch
@@ -47,5 +45,5 @@ rhel6.6/ext4-remove-truncate-warning.patch
 rhel6.6/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
 rhel6.3/ext4-notalloc_under_idatasem.patch
 rhel6.5/ext4-give-warning-with-dir-htree-growing.patch
-rhel6.6/ext4_s_max_ext_tree_depth.patch
+rhel6.6/ext4_s_max_ext_tree_depth.patch 
 rhel6.5/ext4-fix-journal-quota.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.7.series b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.7.series
index c25d944..67f4bde 100644
--- a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.7.series
+++ b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.7.series
@@ -2,7 +2,6 @@ rhel6.3/ext4-use-vzalloc-in-ext4_fill_flex_info.patch
 rhel6.3/ext4-introduce-ext4_kvmalloc-ext4_kzalloc-and-ext4_kvfree.patch
 rhel6.3/ext4-add-missing-kfree-on-error-return-path-in-add_new_gdb.patch
 rhel6.3/ext4-use-ext4_kvzalloc-ext4_kvmalloc-for-s_group_desc-and-s_group_info.patch
-rhel6.3/ext4-map_inode_page-2.6.18.patch
 rhel6.3/export-ext4-2.6.patch
 rhel6.3/ext4-remove-cond_resched-calls.patch
 rhel6.3/ext4-nlink-2.6.patch
@@ -27,8 +26,7 @@ rhel6.3/ext4-large-eas.patch
 rhel6.3/ext4-disable-mb-cache.patch
 rhel6.3/ext4-nocmtime-2.6.patch
 rhel6.3/ext4-journal-callback.patch
-rhel6.5/ext4-ext-walk-space.patch
-rhel6.3/ext4-store-tree-generation-at-find.patch
+rhel6.6/ext4-add-new-abstraction-ext4_map_blocks.patch
 rhel6.3/ext4-large-dir.patch
 rhel6.3/ext4-pdirop.patch
 rhel6.4/ext4-extra-isize.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.8.series b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.8.series
index 9e31c93..3c4ab71 100644
--- a/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.8.series
+++ b/ldiskfs/kernel_patches/series/ldiskfs-2.6-rhel6.8.series
@@ -2,7 +2,6 @@ rhel6.3/ext4-use-vzalloc-in-ext4_fill_flex_info.patch
 rhel6.3/ext4-introduce-ext4_kvmalloc-ext4_kzalloc-and-ext4_kvfree.patch
 rhel6.3/ext4-add-missing-kfree-on-error-return-path-in-add_new_gdb.patch
 rhel6.3/ext4-use-ext4_kvzalloc-ext4_kvmalloc-for-s_group_desc-and-s_group_info.patch
-rhel6.3/ext4-map_inode_page-2.6.18.patch
 rhel6.3/export-ext4-2.6.patch
 rhel6.3/ext4-remove-cond_resched-calls.patch
 rhel6.3/ext4-nlink-2.6.patch
@@ -27,8 +26,7 @@ rhel6.3/ext4-large-eas.patch
 rhel6.3/ext4-disable-mb-cache.patch
 rhel6.3/ext4-nocmtime-2.6.patch
 rhel6.8/ext4-journal-callback.patch
-rhel6.5/ext4-ext-walk-space.patch
-rhel6.3/ext4-store-tree-generation-at-find.patch
+rhel6.6/ext4-add-new-abstraction-ext4_map_blocks.patch
 rhel6.3/ext4-large-dir.patch
 rhel6.8/ext4-pdirop.patch
 rhel6.4/ext4-extra-isize.patch
-- 
2.9.0