Author: adamg Date: Mon Feb 23 22:19:27 2009 GMT Module: SOURCES Tag: LINUX_2_6 ---- Log message: - updated from the for-stable branch
---- Files affected: SOURCES: kernel-ext4.patch (1.1.2.2 -> 1.1.2.3) ---- Diffs: ================================================================ Index: SOURCES/kernel-ext4.patch diff -u SOURCES/kernel-ext4.patch:1.1.2.2 SOURCES/kernel-ext4.patch:1.1.2.3 --- SOURCES/kernel-ext4.patch:1.1.2.2 Mon Feb 2 21:49:38 2009 +++ SOURCES/kernel-ext4.patch Mon Feb 23 23:19:21 2009 @@ -1,2166 +1,370 @@ -http://git.kernel.org/?p=linux/kernel/git/tytso/ext4.git;a=commitdiff;h=e041093f35eee3bc7093788c3753bc4493e62d75 +Aneesh Kumar K.V (3): + ext4: Fix lockdep warning + ext4: Initialize preallocation list_head's properly + ext4: Implement range_cyclic in ext4_da_writepages instead of write_cache_pages -Aneesh Kumar K.V (14): - ext4: Fix the delalloc writepages to allocate blocks at the right offset. - ext4: avoid ext4_error when mounting a fs with a single bg - ext4: Don't overwrite allocation_context ac_status - ext4: Add blocks added during resize to bitmap - ext4: Use EXT4_GROUP_INFO_NEED_INIT_BIT during resize - ext4: cleanup mballoc header files - ext4: don't use blocks freed but not yet committed in buddy cache init - ext4: Fix race between read_block_bitmap() and mark_diskspace_used() - ext4: Fix the race between read_inode_bitmap() and ext4_new_inode() - ext4: Use new buffer_head flag to check uninit group bitmaps initialization - ext4: mark the blocks/inode bitmap beyond end of group as used - ext4: Don't allow new groups to be added during block allocation - ext4: Init the complete page while building buddy cache - ext4: Fix s_dirty_blocks_counter if block allocation failed with nodelalloc +Dan Carpenter (1): + ext4: Fix NULL dereference in ext4_ext_migrate()'s error handling -Mark Fasheh (1): - jbd2: Add BH_JBDPrivateStart +Jan Kara (3): + jbd2: Fix return value of jbd2_journal_start_commit() + Revert "ext4: wait on all pending commits in ext4_sync_fs()" + jbd2: Avoid possible NULL dereference in jbd2_journal_begin_ordered_truncate() -Theodore Ts'o (9): - ext4: Add support for non-native signed/unsigned htree hash algorithms - ext4: tone down ext4_da_writepages warnings - jbd2: Add barrier not supported test to journal_wait_on_commit_record - ext4: Add sanity checks for the superblock before mounting the filesystem - ext4: only use i_size_high for regular files - ext4: Add sanity check to make_indexed_dir - ext3: Add sanity check to make_indexed_dir - jbd2: On a __journal_expect() assertion failure printk "JBD2", not "EXT3-fs" - ext4: Initialize the new group descriptor when resizing the filesystem +Wei Yongjun (1): + ext4: Fix to read empty directory blocks correctly in 64k -Yasunori Goto (1): - ext4: Widen type of ext4_sb_info.s_mb_maxs[] - -diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c -index db35cfd..49ae5e4 100644 ---- a/fs/ext4/balloc.c -+++ b/fs/ext4/balloc.c -@@ -20,6 +20,7 @@ - #include "ext4.h" - #include "ext4_jbd2.h" - #include "group.h" -+#include "mballoc.h" - - /* - * balloc.c contains the blocks allocation and deallocation routines -@@ -319,20 +320,41 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) - block_group, bitmap_blk); - return NULL; - } -- if (buffer_uptodate(bh) && -- !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) -+ -+ if (bitmap_uptodate(bh)) - return bh; - - lock_buffer(bh); -+ if (bitmap_uptodate(bh)) { -+ unlock_buffer(bh); -+ return bh; -+ } - spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { - ext4_init_block_bitmap(sb, bh, block_group, desc); -+ set_bitmap_uptodate(bh); - set_buffer_uptodate(bh); - unlock_buffer(bh); - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); - return bh; - } - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); -+ if (buffer_uptodate(bh)) { -+ /* -+ * if not uninit if bh is uptodate, -+ * bitmap is also uptodate -+ */ -+ set_bitmap_uptodate(bh); -+ unlock_buffer(bh); -+ return bh; -+ } -+ /* -+ * submit the buffer_head for read. We can -+ * safely mark the bitmap as uptodate now. -+ * We do it here so the bitmap uptodate bit -+ * get set with buffer lock held. -+ */ -+ set_bitmap_uptodate(bh); - if (bh_submit_read(bh) < 0) { - put_bh(bh); - ext4_error(sb, __func__, -@@ -350,62 +372,44 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) - } - - /** -- * ext4_free_blocks_sb() -- Free given blocks and update quota -+ * ext4_add_groupblocks() -- Add given blocks to an existing group - * @handle: handle to this transaction - * @sb: super block -- * @block: start physcial block to free -+ * @block: start physcial block to add to the block group - * @count: number of blocks to free -- * @pdquot_freed_blocks: pointer to quota - * -- * XXX This function is only used by the on-line resizing code, which -- * should probably be fixed up to call the mballoc variant. There -- * this needs to be cleaned up later; in fact, I'm not convinced this -- * is 100% correct in the face of the mballoc code. The online resizing -- * code needs to be fixed up to more tightly (and correctly) interlock -- * with the mballoc code. -+ * This marks the blocks as free in the bitmap. We ask the -+ * mballoc to reload the buddy after this by setting group -+ * EXT4_GROUP_INFO_NEED_INIT_BIT flag - */ --void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, -- ext4_fsblk_t block, unsigned long count, -- unsigned long *pdquot_freed_blocks) -+void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, -+ ext4_fsblk_t block, unsigned long count) - { - struct buffer_head *bitmap_bh = NULL; - struct buffer_head *gd_bh; - ext4_group_t block_group; - ext4_grpblk_t bit; - unsigned long i; -- unsigned long overflow; - struct ext4_group_desc *desc; - struct ext4_super_block *es; - struct ext4_sb_info *sbi; - int err = 0, ret; -- ext4_grpblk_t group_freed; -+ ext4_grpblk_t blocks_freed; -+ struct ext4_group_info *grp; - -- *pdquot_freed_blocks = 0; - sbi = EXT4_SB(sb); - es = sbi->s_es; -- if (block < le32_to_cpu(es->s_first_data_block) || -- block + count < block || -- block + count > ext4_blocks_count(es)) { -- ext4_error(sb, "ext4_free_blocks", -- "Freeing blocks not in datazone - " -- "block = %llu, count = %lu", block, count); -- goto error_return; -- } -- -- ext4_debug("freeing block(s) %llu-%llu\n", block, block + count - 1); -+ ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); - --do_more: -- overflow = 0; - ext4_get_group_no_and_offset(sb, block, &block_group, &bit); -+ grp = ext4_get_group_info(sb, block_group); - /* - * Check to see if we are freeing blocks across a group - * boundary. - */ - if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { -- overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); -- count -= overflow; -+ goto error_return; - } -- brelse(bitmap_bh); - bitmap_bh = ext4_read_block_bitmap(sb, block_group); - if (!bitmap_bh) - goto error_return; -@@ -418,18 +422,17 @@ do_more: - in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || - in_range(block + count - 1, ext4_inode_table(sb, desc), - sbi->s_itb_per_group)) { -- ext4_error(sb, "ext4_free_blocks", -- "Freeing blocks in system zones - " -+ ext4_error(sb, __func__, -+ "Adding blocks in system zones - " - "Block = %llu, count = %lu", - block, count); - goto error_return; - } - - /* -- * We are about to start releasing blocks in the bitmap, -+ * We are about to add blocks to the bitmap, - * so we need undo access. - */ -- /* @@@ check errors */ - BUFFER_TRACE(bitmap_bh, "getting undo access"); - err = ext4_journal_get_undo_access(handle, bitmap_bh); - if (err) -@@ -444,90 +447,42 @@ do_more: - err = ext4_journal_get_write_access(handle, gd_bh); - if (err) - goto error_return; -- -- jbd_lock_bh_state(bitmap_bh); -- -- for (i = 0, group_freed = 0; i < count; i++) { -- /* -- * An HJ special. This is expensive... -- */ --#ifdef CONFIG_JBD2_DEBUG -- jbd_unlock_bh_state(bitmap_bh); -- { -- struct buffer_head *debug_bh; -- debug_bh = sb_find_get_block(sb, block + i); -- if (debug_bh) { -- BUFFER_TRACE(debug_bh, "Deleted!"); -- if (!bh2jh(bitmap_bh)->b_committed_data) -- BUFFER_TRACE(debug_bh, -- "No commited data in bitmap"); -- BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); -- __brelse(debug_bh); -- } -- } -- jbd_lock_bh_state(bitmap_bh); --#endif -- if (need_resched()) { -- jbd_unlock_bh_state(bitmap_bh); -- cond_resched(); -- jbd_lock_bh_state(bitmap_bh); -- } -- /* @@@ This prevents newly-allocated data from being -- * freed and then reallocated within the same -- * transaction. -- * -- * Ideally we would want to allow that to happen, but to -- * do so requires making jbd2_journal_forget() capable of -- * revoking the queued write of a data block, which -- * implies blocking on the journal lock. *forget() -- * cannot block due to truncate races. -- * -- * Eventually we can fix this by making jbd2_journal_forget() -- * return a status indicating whether or not it was able -- * to revoke the buffer. On successful revoke, it is -- * safe not to set the allocation bit in the committed -- * bitmap, because we know that there is no outstanding -- * activity on the buffer any more and so it is safe to -- * reallocate it. -- */ -- BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); -- J_ASSERT_BH(bitmap_bh, -- bh2jh(bitmap_bh)->b_committed_data != NULL); -- ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, -- bh2jh(bitmap_bh)->b_committed_data); -- -- /* -- * We clear the bit in the bitmap after setting the committed -- * data bit, because this is the reverse order to that which -- * the allocator uses. -- */ -+ /* -+ * make sure we don't allow a parallel init on other groups in the -+ * same buddy cache -+ */ -+ down_write(&grp->alloc_sem); -+ for (i = 0, blocks_freed = 0; i < count; i++) { - BUFFER_TRACE(bitmap_bh, "clear bit"); - if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), - bit + i, bitmap_bh->b_data)) { -- jbd_unlock_bh_state(bitmap_bh); - ext4_error(sb, __func__, - "bit already cleared for block %llu", - (ext4_fsblk_t)(block + i)); -- jbd_lock_bh_state(bitmap_bh); - BUFFER_TRACE(bitmap_bh, "bit already cleared"); - } else { -- group_freed++; -+ blocks_freed++; - } - } -- jbd_unlock_bh_state(bitmap_bh); -- - spin_lock(sb_bgl_lock(sbi, block_group)); -- le16_add_cpu(&desc->bg_free_blocks_count, group_freed); -+ le16_add_cpu(&desc->bg_free_blocks_count, blocks_freed); - desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); - spin_unlock(sb_bgl_lock(sbi, block_group)); -- percpu_counter_add(&sbi->s_freeblocks_counter, count); -+ percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); - - if (sbi->s_log_groups_per_flex) { - ext4_group_t flex_group = ext4_flex_group(sbi, block_group); - spin_lock(sb_bgl_lock(sbi, flex_group)); -- sbi->s_flex_groups[flex_group].free_blocks += count; -+ sbi->s_flex_groups[flex_group].free_blocks += blocks_freed; - spin_unlock(sb_bgl_lock(sbi, flex_group)); - } -+ /* -+ * request to reload the buddy with the -+ * new bitmap information -+ */ -+ set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); -+ ext4_mb_update_group_info(grp, blocks_freed); -+ up_write(&grp->alloc_sem); - - /* We dirtied the bitmap block */ - BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); -@@ -536,15 +491,10 @@ do_more: - /* And the group descriptor block */ - BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); - ret = ext4_journal_dirty_metadata(handle, gd_bh); -- if (!err) err = ret; -- *pdquot_freed_blocks += group_freed; -- -- if (overflow && !err) { -- block += count; -- count = overflow; -- goto do_more; -- } -+ if (!err) -+ err = ret; - sb->s_dirt = 1; -+ - error_return: - brelse(bitmap_bh); - ext4_std_error(sb, err); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h -index b0537c8..dfccef5 100644 +index dfccef5..0b0c0fa 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h -@@ -19,6 +19,7 @@ - #include <linux/types.h> - #include <linux/blkdev.h> - #include <linux/magic.h> -+#include <linux/jbd2.h> - #include "ext4_i.h" - - /* -@@ -891,6 +892,9 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len) - #define DX_HASH_LEGACY 0 - #define DX_HASH_HALF_MD4 1 - #define DX_HASH_TEA 2 -+#define DX_HASH_LEGACY_UNSIGNED 3 -+#define DX_HASH_HALF_MD4_UNSIGNED 4 -+#define DX_HASH_TEA_UNSIGNED 5 - - #ifdef __KERNEL__ - -@@ -1006,9 +1010,8 @@ extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks); - extern int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks); - extern void ext4_free_blocks(handle_t *handle, struct inode *inode, - ext4_fsblk_t block, unsigned long count, int metadata); --extern void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, -- ext4_fsblk_t block, unsigned long count, -- unsigned long *pdquot_freed_blocks); -+extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, -+ ext4_fsblk_t block, unsigned long count); - extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *); - extern void ext4_check_blocks_bitmap(struct super_block *); - extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, -@@ -1054,12 +1057,13 @@ extern int __init init_ext4_mballoc(void); - extern void exit_ext4_mballoc(void); - extern void ext4_mb_free_blocks(handle_t *, struct inode *, - unsigned long, unsigned long, int, unsigned long *); --extern int ext4_mb_add_more_groupinfo(struct super_block *sb, -+extern int ext4_mb_add_groupinfo(struct super_block *sb, - ext4_group_t i, struct ext4_group_desc *desc); - extern void ext4_mb_update_group_info(struct ext4_group_info *grp, - ext4_grpblk_t add); -- -- -+extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t); -+extern void ext4_mb_put_buddy_cache_lock(struct super_block *, -+ ext4_group_t, int); - /* inode.c */ - int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, - struct buffer_head *bh, ext4_fsblk_t blocknr); -@@ -1184,8 +1188,11 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es, - - static inline loff_t ext4_isize(struct ext4_inode *raw_inode) +@@ -862,7 +862,7 @@ static inline unsigned ext4_rec_len_from_disk(__le16 dlen) { -- return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | -- le32_to_cpu(raw_inode->i_size_lo); -+ if (S_ISREG(le16_to_cpu(raw_inode->i_mode))) -+ return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | -+ le32_to_cpu(raw_inode->i_size_lo); -+ else -+ return (loff_t) le32_to_cpu(raw_inode->i_size_lo); - } + unsigned len = le16_to_cpu(dlen); - static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size) -@@ -1283,6 +1290,24 @@ extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, - sector_t block, unsigned long max_blocks, - struct buffer_head *bh, int create, - int extend_disksize, int flag); -+ -+/* -+ * Add new method to test wether block and inode bitmaps are properly -+ * initialized. With uninit_bg reading the block from disk is not enough -+ * to mark the bitmap uptodate. We need to also zero-out the bitmap -+ */ -+#define BH_BITMAP_UPTODATE BH_JBDPrivateStart -+ -+static inline int bitmap_uptodate(struct buffer_head *bh) -+{ -+ return (buffer_uptodate(bh) && -+ test_bit(BH_BITMAP_UPTODATE, &(bh)->b_state)); -+} -+static inline void set_bitmap_uptodate(struct buffer_head *bh) -+{ -+ set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); -+} -+ - #endif /* __KERNEL__ */ - - #endif /* _EXT4_H */ -diff --git a/fs/ext4/ext4_sb.h b/fs/ext4/ext4_sb.h -index 445fde6..f00f112 100644 ---- a/fs/ext4/ext4_sb.h -+++ b/fs/ext4/ext4_sb.h -@@ -57,6 +57,7 @@ struct ext4_sb_info { - u32 s_next_generation; - u32 s_hash_seed[4]; - int s_def_hash_version; -+ int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */ - struct percpu_counter s_freeblocks_counter; - struct percpu_counter s_freeinodes_counter; - struct percpu_counter s_dirs_counter; -@@ -101,7 +102,8 @@ struct ext4_sb_info { - spinlock_t s_reserve_lock; - spinlock_t s_md_lock; - tid_t s_last_transaction; -- unsigned short *s_mb_offsets, *s_mb_maxs; -+ unsigned short *s_mb_offsets; -+ unsigned int *s_mb_maxs; - - /* tunables */ - unsigned long s_stripe; -diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c -index 556ca8e..ac8f168 100644 ---- a/fs/ext4/hash.c -+++ b/fs/ext4/hash.c -@@ -35,23 +35,71 @@ static void TEA_transform(__u32 buf[4], __u32 const in[]) - - - /* The old legacy hash */ --static __u32 dx_hack_hash(const char *name, int len) -+static __u32 dx_hack_hash_unsigned(const char *name, int len) - { -- __u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; -+ __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; -+ const unsigned char *ucp = (const unsigned char *) name; -+ -+ while (len--) { -+ hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); -+ -+ if (hash & 0x80000000) -+ hash -= 0x7fffffff; -+ hash1 = hash0; -+ hash0 = hash; -+ } -+ return hash0 << 1; -+} -+ -+static __u32 dx_hack_hash_signed(const char *name, int len) -+{ -+ __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; -+ const signed char *scp = (const signed char *) name; -+ - while (len--) { -- __u32 hash = hash1 + (hash0 ^ (*name++ * 7152373)); -+ hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); - -- if (hash & 0x80000000) hash -= 0x7fffffff; -+ if (hash & 0x80000000) -+ hash -= 0x7fffffff; - hash1 = hash0; - hash0 = hash; - } -- return (hash0 << 1); -+ return hash0 << 1; -+} -+ -+static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num) -+{ -+ __u32 pad, val; -+ int i; -+ const signed char *scp = (const signed char *) msg; -+ -+ pad = (__u32)len | ((__u32)len << 8); -+ pad |= pad << 16; -+ -+ val = pad; -+ if (len > num*4) -+ len = num * 4; -+ for (i = 0; i < len; i++) { -+ if ((i % 4) == 0) -+ val = pad; -+ val = ((int) scp[i]) + (val << 8); -+ if ((i % 4) == 3) { -+ *buf++ = val; -+ val = pad; -+ num--; -+ } -+ } -+ if (--num >= 0) -+ *buf++ = val; -+ while (--num >= 0) -+ *buf++ = pad; +- if (len == EXT4_MAX_REC_LEN) ++ if (len == EXT4_MAX_REC_LEN || len == 0) + return 1 << 16; + return len; } - --static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) -+static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num) - { - __u32 pad, val; - int i; -+ const unsigned char *ucp = (const unsigned char *) msg; - - pad = (__u32)len | ((__u32)len << 8); - pad |= pad << 16; -@@ -62,7 +110,7 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) - for (i = 0; i < len; i++) { - if ((i % 4) == 0) - val = pad; -- val = msg[i] + (val << 8); -+ val = ((int) ucp[i]) + (val << 8); - if ((i % 4) == 3) { - *buf++ = val; - val = pad; -@@ -95,6 +143,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) - const char *p; - int i; - __u32 in[8], buf[4]; -+ void (*str2hashbuf)(const char *, int, __u32 *, int) = -+ str2hashbuf_signed; - - /* Initialize the default seed for the hash checksum functions */ - buf[0] = 0x67452301; -@@ -113,13 +163,18 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) - } - - switch (hinfo->hash_version) { -+ case DX_HASH_LEGACY_UNSIGNED: -+ hash = dx_hack_hash_unsigned(name, len); -+ break; - case DX_HASH_LEGACY: -- hash = dx_hack_hash(name, len); -+ hash = dx_hack_hash_signed(name, len); - break; -+ case DX_HASH_HALF_MD4_UNSIGNED: -+ str2hashbuf = str2hashbuf_unsigned; - case DX_HASH_HALF_MD4: - p = name; - while (len > 0) { -- str2hashbuf(p, len, in, 8); -+ (*str2hashbuf)(p, len, in, 8); - half_md4_transform(buf, in); - len -= 32; - p += 32; -@@ -127,10 +182,12 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) - minor_hash = buf[2]; - hash = buf[1]; - break; -+ case DX_HASH_TEA_UNSIGNED: -+ str2hashbuf = str2hashbuf_unsigned; - case DX_HASH_TEA: - p = name; - while (len > 0) { -- str2hashbuf(p, len, in, 4); -+ (*str2hashbuf)(p, len, in, 4); - TEA_transform(buf, in); - len -= 16; - p += 16; -diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c -index 2a117e2..b363c49 100644 ---- a/fs/ext4/ialloc.c -+++ b/fs/ext4/ialloc.c -@@ -84,7 +84,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, - } - - memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); -- mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb), -+ mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, <<Diff was trimmed, longer than 597 lines>> ---- CVS-web: http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/SOURCES/kernel-ext4.patch?r1=1.1.2.2&r2=1.1.2.3&f=u _______________________________________________ pld-cvs-commit mailing list [email protected] http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit
