Commit 7ce1418f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6: (31 commits)
  dquot: Detect partial write error to quota file in write_blk() and add printk_ratelimit for quota error messages
  ocfs2: Fix lock inversion in quotas during umount
  ocfs2: Use __dquot_transfer to avoid lock inversion
  ocfs2: Fix NULL pointer deref when writing local dquot
  ocfs2: Fix estimate of credits needed for quota allocation
  ocfs2: Fix quota locking
  ocfs2: Avoid unnecessary block mapping when refreshing quota info
  ocfs2: Do not map blocks from local quota file on each write
  quota: Refactor dquot_transfer code so that OCFS2 can pass in its references
  quota: unify quota init condition in setattr
  quota: remove sb_has_quota_active in get/set_info
  quota: unify ->set_dqblk
  quota: unify ->get_dqblk
  ext3: make barrier options consistent with ext4
  quota: Make quota stat accounting lockless.
  suppress warning: "quotatypes" defined but not used
  ext3: Fix waiting on transaction during fsync
  jbd: Provide function to check whether transaction will issue data barrier
  ufs: add ufs speciffic ->setattr call
  BKL: Remove BKL from ext2 filesystem
  ...
parents a6f03986 1907131b
......@@ -59,8 +59,19 @@ commit=nrsec (*) Ext3 can be told to sync all its data and metadata
Setting it to very large values will improve
performance.
barrier=1 This enables/disables barriers. barrier=0 disables
it, barrier=1 enables it.
barrier=<0(*)|1> This enables/disables the use of write barriers in
barrier the jbd code. barrier=0 disables, barrier=1 enables.
nobarrier (*) This also requires an IO stack which can support
barriers, and if jbd gets an error on a barrier
write, it will disable again with a warning.
Write barriers enforce proper on-disk ordering
of journal commits, making volatile disk write caches
safe to use, at some performance penalty. If
your disks are battery-backed in one way or another,
disabling barriers may safely improve performance.
The mount options "barrier" and "nobarrier" can
also be used to enable or disable barriers, for
consistency with other ext3 mount options.
orlov (*) This enables the new Orlov block allocator. It is
enabled by default.
......
......@@ -1331,6 +1331,12 @@ retry_alloc:
goto io_error;
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
/*
* skip this group (and avoid loading bitmap) if there
* are no free blocks
*/
if (!free_blocks)
continue;
/*
* skip this group if the number of
* free blocks is less than half of the reservation
......
......@@ -106,7 +106,7 @@ void ext2_free_inode (struct inode * inode)
struct super_block * sb = inode->i_sb;
int is_directory;
unsigned long ino;
struct buffer_head *bitmap_bh = NULL;
struct buffer_head *bitmap_bh;
unsigned long block_group;
unsigned long bit;
struct ext2_super_block * es;
......@@ -135,14 +135,13 @@ void ext2_free_inode (struct inode * inode)
ino > le32_to_cpu(es->s_inodes_count)) {
ext2_error (sb, "ext2_free_inode",
"reserved or nonexistent inode %lu", ino);
goto error_return;
return;
}
block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
brelse(bitmap_bh);
bitmap_bh = read_inode_bitmap(sb, block_group);
if (!bitmap_bh)
goto error_return;
return;
/* Ok, now we can actually update the inode bitmaps.. */
if (!ext2_clear_bit_atomic(sb_bgl_lock(EXT2_SB(sb), block_group),
......@@ -154,7 +153,7 @@ void ext2_free_inode (struct inode * inode)
mark_buffer_dirty(bitmap_bh);
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
error_return:
brelse(bitmap_bh);
}
......
......@@ -22,7 +22,6 @@
* Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
*/
#include <linux/smp_lock.h>
#include <linux/time.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
......@@ -1406,11 +1405,11 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
/* If this is the first large file
* created, add a flag to the superblock.
*/
lock_kernel();
spin_lock(&EXT2_SB(sb)->s_lock);
ext2_update_dynamic_rev(sb);
EXT2_SET_RO_COMPAT_FEATURE(sb,
EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
unlock_kernel();
spin_unlock(&EXT2_SB(sb)->s_lock);
ext2_write_super(sb);
}
}
......@@ -1467,7 +1466,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
if (error)
return error;
if (iattr->ia_valid & ATTR_SIZE)
if (is_quota_modification(inode, iattr))
dquot_initialize(inode);
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
......
......@@ -26,7 +26,6 @@
#include <linux/random.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
......@@ -39,7 +38,7 @@
#include "xip.h"
static void ext2_sync_super(struct super_block *sb,
struct ext2_super_block *es);
struct ext2_super_block *es, int wait);
static int ext2_remount (struct super_block * sb, int * flags, char * data);
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
static int ext2_sync_fs(struct super_block *sb, int wait);
......@@ -52,9 +51,11 @@ void ext2_error (struct super_block * sb, const char * function,
struct ext2_super_block *es = sbi->s_es;
if (!(sb->s_flags & MS_RDONLY)) {
spin_lock(&sbi->s_lock);
sbi->s_mount_state |= EXT2_ERROR_FS;
es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
ext2_sync_super(sb, es);
spin_unlock(&sbi->s_lock);
ext2_sync_super(sb, es, 1);
}
va_start(args, fmt);
......@@ -84,6 +85,9 @@ void ext2_msg(struct super_block *sb, const char *prefix,
va_end(args);
}
/*
* This must be called with sbi->s_lock held.
*/
void ext2_update_dynamic_rev(struct super_block *sb)
{
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
......@@ -115,8 +119,6 @@ static void ext2_put_super (struct super_block * sb)
int i;
struct ext2_sb_info *sbi = EXT2_SB(sb);
lock_kernel();
if (sb->s_dirt)
ext2_write_super(sb);
......@@ -124,8 +126,10 @@ static void ext2_put_super (struct super_block * sb)
if (!(sb->s_flags & MS_RDONLY)) {
struct ext2_super_block *es = sbi->s_es;
spin_lock(&sbi->s_lock);
es->s_state = cpu_to_le16(sbi->s_mount_state);
ext2_sync_super(sb, es);
spin_unlock(&sbi->s_lock);
ext2_sync_super(sb, es, 1);
}
db_count = sbi->s_gdb_count;
for (i = 0; i < db_count; i++)
......@@ -140,8 +144,6 @@ static void ext2_put_super (struct super_block * sb)
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
unlock_kernel();
}
static struct kmem_cache * ext2_inode_cachep;
......@@ -209,6 +211,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
struct ext2_super_block *es = sbi->s_es;
unsigned long def_mount_opts;
spin_lock(&sbi->s_lock);
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (sbi->s_sb_block != 1)
......@@ -281,6 +284,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (!test_opt(sb, RESERVATION))
seq_puts(seq, ",noreservation");
spin_unlock(&sbi->s_lock);
return 0;
}
......@@ -606,7 +610,6 @@ static int ext2_setup_super (struct super_block * sb,
if (!le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
ext2_write_super(sb);
if (test_opt (sb, DEBUG))
ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
"bpg=%lu, ipg=%lu, mo=%04lx]",
......@@ -767,6 +770,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
spin_lock_init(&sbi->s_lock);
/*
* See what the current blocksize for the device is, and
* use that as the blocksize. Otherwise (or if the blocksize
......@@ -1079,7 +1084,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
sb->s_flags |= MS_RDONLY;
ext2_write_super(sb);
return 0;
cantfind_ext2:
......@@ -1120,30 +1127,26 @@ static void ext2_clear_super_error(struct super_block *sb)
* be remapped. Nothing we can do but to retry the
* write and hope for the best.
*/
printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
"superblock detected", sb->s_id);
ext2_msg(sb, KERN_ERR,
"previous I/O error to superblock detected\n");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
}
static void ext2_commit_super (struct super_block * sb,
struct ext2_super_block * es)
{
ext2_clear_super_error(sb);
es->s_wtime = cpu_to_le32(get_seconds());
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
}
static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
int wait)
{
ext2_clear_super_error(sb);
spin_lock(&EXT2_SB(sb)->s_lock);
es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
es->s_wtime = cpu_to_le32(get_seconds());
/* unlock before we do IO */
spin_unlock(&EXT2_SB(sb)->s_lock);
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
if (wait)
sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
}
......@@ -1157,43 +1160,18 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
* may have been checked while mounted and e2fsck may have
* set s_state to EXT2_VALID_FS after some corrections.
*/
static int ext2_sync_fs(struct super_block *sb, int wait)
{
struct ext2_sb_info *sbi = EXT2_SB(sb);
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
lock_kernel();
if (buffer_write_io_error(sbh)) {
/*
* Oh, dear. A previous attempt to write the
* superblock failed. This could happen because the
* USB device was yanked out. Or it could happen to
* be a transient write error and maybe the block will
* be remapped. Nothing we can do but to retry the
* write and hope for the best.
*/
ext2_msg(sb, KERN_ERR,
"previous I/O error to superblock detected\n");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
spin_lock(&sbi->s_lock);
if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
ext2_debug("setting valid to 0\n");
es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
es->s_free_blocks_count =
cpu_to_le32(ext2_count_free_blocks(sb));
es->s_free_inodes_count =
cpu_to_le32(ext2_count_free_inodes(sb));
es->s_mtime = cpu_to_le32(get_seconds());
ext2_sync_super(sb, es);
} else {
ext2_commit_super(sb, es);
}
sb->s_dirt = 0;
unlock_kernel();
spin_unlock(&sbi->s_lock);
ext2_sync_super(sb, es, wait);
return 0;
}
......@@ -1215,7 +1193,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
unsigned long old_sb_flags;
int err;
lock_kernel();
spin_lock(&sbi->s_lock);
/* Store the old options */
old_sb_flags = sb->s_flags;
......@@ -1254,13 +1232,13 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
}
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
unlock_kernel();
spin_unlock(&sbi->s_lock);
return 0;
}
if (*flags & MS_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS)) {
unlock_kernel();
spin_unlock(&sbi->s_lock);
return 0;
}
/*
......@@ -1269,6 +1247,8 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
*/
es->s_state = cpu_to_le16(sbi->s_mount_state);
es->s_mtime = cpu_to_le32(get_seconds());
spin_unlock(&sbi->s_lock);
ext2_sync_super(sb, es, 1);
} else {
__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
~EXT2_FEATURE_RO_COMPAT_SUPP);
......@@ -1288,16 +1268,16 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
sbi->s_mount_state = le16_to_cpu(es->s_state);
if (!ext2_setup_super (sb, es, 0))
sb->s_flags &= ~MS_RDONLY;
spin_unlock(&sbi->s_lock);
ext2_write_super(sb);
}
ext2_sync_super(sb, es);
unlock_kernel();
return 0;
restore_opts:
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_resuid = old_opts.s_resuid;
sbi->s_resgid = old_opts.s_resgid;
sb->s_flags = old_sb_flags;
unlock_kernel();
spin_unlock(&sbi->s_lock);
return err;
}
......@@ -1308,6 +1288,8 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
struct ext2_super_block *es = sbi->s_es;
u64 fsid;
spin_lock(&sbi->s_lock);
if (test_opt (sb, MINIX_DF))
sbi->s_overhead_last = 0;
else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
......@@ -1362,6 +1344,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
le64_to_cpup((void *)es->s_uuid + sizeof(u64));
buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
spin_unlock(&sbi->s_lock);
return 0;
}
......
......@@ -345,7 +345,9 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
return;
spin_lock(&EXT2_SB(sb)->s_lock);
EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
spin_unlock(&EXT2_SB(sb)->s_lock);
sb->s_dirt = 1;
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
......
......@@ -1583,6 +1583,12 @@ retry_alloc:
if (!gdp)
goto io_error;
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
/*
* skip this group (and avoid loading bitmap) if there
* are no free blocks
*/
if (!free_blocks)
continue;
/*
* skip this group if the number of
* free blocks is less than half of the reservation
......
......@@ -48,7 +48,7 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
struct inode *inode = dentry->d_inode;
struct ext3_inode_info *ei = EXT3_I(inode);
journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
int ret = 0;
int ret, needs_barrier = 0;
tid_t commit_tid;
if (inode->i_sb->s_flags & MS_RDONLY)
......@@ -70,28 +70,26 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
if (ext3_should_journal_data(inode)) {
ret = ext3_force_commit(inode->i_sb);
goto out;
}
if (ext3_should_journal_data(inode))
return ext3_force_commit(inode->i_sb);
if (datasync)
commit_tid = atomic_read(&ei->i_datasync_tid);
else
commit_tid = atomic_read(&ei->i_sync_tid);
if (log_start_commit(journal, commit_tid)) {
log_wait_commit(journal, commit_tid);
goto out;
}
if (test_opt(inode->i_sb, BARRIER) &&
!journal_trans_will_send_data_barrier(journal, commit_tid))
needs_barrier = 1;
log_start_commit(journal, commit_tid);
ret = log_wait_commit(journal, commit_tid);
/*
* In case we didn't commit a transaction, we have to flush
* disk caches manually so that data really is on persistent
* storage
*/
if (test_opt(inode->i_sb, BARRIER))
if (needs_barrier)
blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
out:
return ret;
}
......@@ -3151,7 +3151,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
if (ia_valid & ATTR_SIZE)
if (is_quota_modification(inode, attr))
dquot_initialize(inode);
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
......
......@@ -653,8 +653,12 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",commit=%u",
(unsigned) (sbi->s_commit_interval / HZ));
}
if (test_opt(sb, BARRIER))
seq_puts(seq, ",barrier=1");
/*
* Always display barrier state so it's clear what the status is.
*/
seq_puts(seq, ",barrier=");
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
......@@ -810,8 +814,8 @@ enum {
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
Opt_usrquota, Opt_grpquota
Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
Opt_resize, Opt_usrquota, Opt_grpquota
};
static const match_table_t tokens = {
......@@ -865,6 +869,8 @@ static const match_table_t tokens = {
{Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_barrier, "barrier=%u"},
{Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
{Opt_resize, "resize"},
{Opt_err, NULL},
};
......@@ -967,7 +973,11 @@ static int parse_options (char *options, struct super_block *sb,
int token;
if (!*p)
continue;
/*
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
args[0].to = args[0].from = 0;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
......@@ -1215,9 +1225,15 @@ set_qf_format:
case Opt_abort:
set_opt(sbi->s_mount_opt, ABORT);
break;
case Opt_nobarrier:
clear_opt(sbi->s_mount_opt, BARRIER);
break;
case Opt_barrier:
if (match_int(&args[0], &option))
return 0;
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
} else
option = 1; /* No argument, default to 1 */
if (option)
set_opt(sbi->s_mount_opt, BARRIER);
else
......@@ -1890,21 +1906,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext3_count_free_blocks(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext3_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext3_count_dirs(sb));
}
if (err) {
ext3_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
......@@ -1945,15 +1946,29 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (!test_opt(sb, NOLOAD) &&
EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext3_load_journal(sb, es, journal_devnum))
goto failed_mount3;
goto failed_mount2;
} else if (journal_inum) {
if (ext3_create_journal(sb, es, journal_inum))
goto failed_mount3;
goto failed_mount2;
} else {
if (!silent)
ext3_msg(sb, KERN_ERR,
"error: no journal found. "
"mounting ext3 over ext2?");
goto failed_mount2;
}
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext3_count_free_blocks(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext3_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext3_count_dirs(sb));
}
if (err) {
ext3_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
......@@ -1978,7 +1993,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
ext3_msg(sb, KERN_ERR,
"error: journal does not support "
"requested data journaling mode");
goto failed_mount4;
goto failed_mount3;
}
default:
break;
......@@ -2001,19 +2016,19 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (IS_ERR(root)) {
ext3_msg(sb, KERN_ERR, "error: get root inode failed");
ret = PTR_ERR(root);
goto failed_mount4;
goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
goto failed_mount4;
goto failed_mount3;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
iput(root);
ret = -ENOMEM;
goto failed_mount4;
goto failed_mount3;
}
ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
......@@ -2039,12 +2054,11 @@ cantfind_ext3:
sb->s_id);
goto failed_mount;
failed_mount4:
journal_destroy(sbi->s_journal);
failed_mount3:
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
journal_destroy(sbi->s_journal);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
......@@ -2278,6 +2292,9 @@ static int ext3_load_journal(struct super_block *sb,
return -EINVAL;
}
if (!(journal->j_flags & JFS_BARRIER))
printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
err = journal_update_format(journal);
if (err) {
......
......@@ -5425,7 +5425,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
if (ia_valid & ATTR_SIZE)
if (is_quota_modification(inode, attr))
dquot_initialize(inode);
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
......
......@@ -1476,8 +1476,8 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
return 0;
}
static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
struct fs_disk_quota *fdq)
static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
struct fs_disk_quota *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_lvb *qlvb;
......@@ -1521,8 +1521,8 @@ out:
/* GFS2 only supports a subset of the XFS fields */
#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
struct fs_disk_quota *fdq)
static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
struct fs_disk_quota *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
......@@ -1629,7 +1629,7 @@ out_put:
const struct quotactl_ops gfs2_quotactl_ops = {
.quota_sync = gfs2_quota_sync,
.get_xstate = gfs2_quota_get_xstate,
.get_xquota = gfs2_xquota_get,
.set_xquota = gfs2_xquota_set,
.get_dqblk = gfs2_get_dqblk,
.set_dqblk = gfs2_set_dqblk,
};
......@@ -786,6 +786,12 @@ wait_for_iobuf:
jbd_debug(3, "JBD: commit phase 6\n");
/* All metadata is written, now write commit record and do cleanup */
spin_lock(&journal->j_state_lock);
J_ASSERT(commit_transaction->t_state == T_COMMIT);
commit_transaction->t_state = T_COMMIT_RECORD;
spin_unlock(&journal->j_state_lock);
if (journal_write_commit_record(journal, commit_transaction))
err = -EIO;
......@@ -923,7 +929,7 @@ restart_loop:
jbd_debug(3, "JBD: commit phase 8\n");
J_ASSERT(commit_transaction->t_state == T_COMMIT);
J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
commit_transaction->t_state = T_FINISHED;
J_ASSERT(commit_transaction == journal->j_committing_transaction);
......
......@@ -564,6 +564,38 @@ int log_wait_commit(journal_t *journal, tid_t tid)
return err;
}
/*
* Return 1 if a given transaction has not yet sent barrier request
* connected with a transaction commit. If 0 is returned, transaction
* may or may not have sent the barrier. Used to avoid sending barrier
* twice in common cases.
*/
int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
{
int ret = 0;
transaction_t *commit_trans;
if (!(journal->j_flags & JFS_BARRIER))
return 0;
spin_lock(&journal->j_state_lock);
/* Transaction already committed? */
if (tid_geq(journal->j_commit_sequence, tid))
goto out;
/*
* Transaction is being committed and we already proceeded to
* writing commit record?
*/
commit_trans = journal->j_committing_transaction;
if (commit_trans && commit_trans->t_tid == tid &&
commit_trans->t_state >= T_COMMIT_RECORD)
goto out;
ret = 1;
out:
spin_unlock(&journal->j_state_lock);
return ret;
}
EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
/*
* Log buffer allocation routines:
*/
......@@ -1157,6 +1189,7 @@ int journal_destroy(journal_t *journal)
{
int err = 0;
/* Wait for the commit thread to wake up and die. */
journal_kill_thread(journal);
......
......@@ -98,7 +98,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (rc)
return rc;