summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/binfmt_elf.c14
-rw-r--r--fs/bio.c5
-rw-r--r--fs/btrfs/Kconfig13
-rw-r--r--fs/btrfs/async-thread.c61
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c315
-rw-r--r--fs/btrfs/ctree.h37
-rw-r--r--fs/btrfs/disk-io.c166
-rw-r--r--fs/btrfs/disk-io.h12
-rw-r--r--fs/btrfs/extent-tree.c519
-rw-r--r--fs/btrfs/extent_io.c134
-rw-r--r--fs/btrfs/extent_io.h18
-rw-r--r--fs/btrfs/extent_map.c1
-rw-r--r--fs/btrfs/file.c13
-rw-r--r--fs/btrfs/inode-map.c1
-rw-r--r--fs/btrfs/inode.c88
-rw-r--r--fs/btrfs/ioctl.c1
-rw-r--r--fs/btrfs/locking.c207
-rw-r--r--fs/btrfs/locking.h6
-rw-r--r--fs/btrfs/ordered-data.c4
-rw-r--r--fs/btrfs/ref-cache.c1
-rw-r--r--fs/btrfs/ref-cache.h1
-rw-r--r--fs/btrfs/super.c11
-rw-r--r--fs/btrfs/transaction.c6
-rw-r--r--fs/btrfs/tree-defrag.c1
-rw-r--r--fs/btrfs/tree-log.c356
-rw-r--r--fs/btrfs/volumes.c55
-rw-r--r--fs/btrfs/xattr.c48
-rw-r--r--fs/btrfs/xattr.h2
-rw-r--r--fs/buffer.c5
-rw-r--r--fs/compat.c2
-rw-r--r--fs/compat_ioctl.c4
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/exec.c28
-rw-r--r--fs/ext2/super.c9
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/inode.c27
-rw-r--r--fs/ext4/mballoc.c32
-rw-r--r--fs/ext4/migrate.c8
-rw-r--r--fs/ext4/super.c11
-rw-r--r--fs/hugetlbfs/inode.c8
-rw-r--r--fs/internal.h2
-rw-r--r--fs/jbd/journal.c17
-rw-r--r--fs/jbd2/journal.c17
-rw-r--r--fs/jbd2/transaction.c42
-rw-r--r--fs/lockd/svclock.c6
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/notify/inotify/inotify.c2
-rw-r--r--fs/ocfs2/alloc.c3
-rw-r--r--fs/ocfs2/dcache.c42
-rw-r--r--fs/ocfs2/dcache.h9
-rw-r--r--fs/ocfs2/dlmglue.c4
-rw-r--r--fs/ocfs2/journal.h6
-rw-r--r--fs/ocfs2/ocfs2.h6
-rw-r--r--fs/ocfs2/quota_global.c4
-rw-r--r--fs/ocfs2/super.c3
-rw-r--r--fs/ocfs2/xattr.c17
-rw-r--r--fs/seq_file.c151
-rw-r--r--fs/super.c21
-rw-r--r--fs/timerfd.c12
-rw-r--r--fs/ubifs/budget.c35
-rw-r--r--fs/ubifs/debug.c122
-rw-r--r--fs/ubifs/debug.h36
-rw-r--r--fs/ubifs/dir.c96
-rw-r--r--fs/ubifs/file.c9
-rw-r--r--fs/ubifs/gc.c28
-rw-r--r--fs/ubifs/io.c22
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/lprops.c12
-rw-r--r--fs/ubifs/lpt_commit.c44
-rw-r--r--fs/ubifs/master.c2
-rw-r--r--fs/ubifs/orphan.c38
-rw-r--r--fs/ubifs/super.c195
-rw-r--r--fs/ubifs/tnc.c12
-rw-r--r--fs/ubifs/ubifs.h26
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c79
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c6
-rw-r--r--fs/xfs/xfs_dfrag.c10
-rw-r--r--fs/xfs/xfs_log_recover.c31
80 files changed, 2416 insertions, 1007 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index e3ff2b9e602..33b7235f853 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1208,9 +1208,11 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
* check for an ELF header. If we find one, dump the first page to
* aid in determining what was mapped here.
*/
- if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
+ if (FILTER(ELF_HEADERS) &&
+ vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
u32 __user *header = (u32 __user *) vma->vm_start;
u32 word;
+ mm_segment_t fs = get_fs();
/*
* Doing it this way gets the constant folded by GCC.
*/
@@ -1223,7 +1225,15 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
magic.elfmag[EI_MAG1] = ELFMAG1;
magic.elfmag[EI_MAG2] = ELFMAG2;
magic.elfmag[EI_MAG3] = ELFMAG3;
- if (get_user(word, header) == 0 && word == magic.cmp)
+ /*
+ * Switch to the user "segment" for get_user(),
+ * then put back what elf_core_dump() had in place.
+ */
+ set_fs(USER_DS);
+ if (unlikely(get_user(word, header)))
+ word = 0;
+ set_fs(fs);
+ if (word == magic.cmp)
return PAGE_SIZE;
}
diff --git a/fs/bio.c b/fs/bio.c
index 062299acbcc..72ab251cdb9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -302,9 +302,10 @@ void bio_init(struct bio *bio)
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
struct bio *bio = NULL;
+ void *p;
if (bs) {
- void *p = mempool_alloc(bs->bio_pool, gfp_mask);
+ p = mempool_alloc(bs->bio_pool, gfp_mask);
if (p)
bio = p + bs->front_pad;
@@ -329,7 +330,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
}
if (unlikely(!bvl)) {
if (bs)
- mempool_free(bio, bs->bio_pool);
+ mempool_free(p, bs->bio_pool);
else
kfree(bio);
bio = NULL;
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index f8fcf999ea1..7bb3c020e57 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -16,3 +16,16 @@ config BTRFS_FS
module will be called btrfs.
If unsure, say N.
+
+config BTRFS_FS_POSIX_ACL
+ bool "Btrfs POSIX Access Control Lists"
+ depends on BTRFS_FS
+ select FS_POSIX_ACL
+ help
+ POSIX Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the POSIX ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 8e2fec05dbe..c84ca1f5259 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -16,11 +16,11 @@
* Boston, MA 021110-1307, USA.
*/
-#include <linux/version.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-# include <linux/freezer.h>
+#include <linux/freezer.h>
+#include <linux/ftrace.h>
#include "async-thread.h"
#define WORK_QUEUED_BIT 0
@@ -143,6 +143,7 @@ static int worker_loop(void *arg)
struct btrfs_work *work;
do {
spin_lock_irq(&worker->lock);
+again_locked:
while (!list_empty(&worker->pending)) {
cur = worker->pending.next;
work = list_entry(cur, struct btrfs_work, list);
@@ -165,14 +166,50 @@ static int worker_loop(void *arg)
check_idle_worker(worker);
}
- worker->working = 0;
if (freezing(current)) {
+ worker->working = 0;
+ spin_unlock_irq(&worker->lock);
refrigerator();
} else {
- set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&worker->lock);
- if (!kthread_should_stop())
+ if (!kthread_should_stop()) {
+ cpu_relax();
+ /*
+ * we've dropped the lock, did someone else
+ * jump_in?
+ */
+ smp_mb();
+ if (!list_empty(&worker->pending))
+ continue;
+
+ /*
+ * this short schedule allows more work to
+ * come in without the queue functions
+ * needing to go through wake_up_process()
+ *
+ * worker->working is still 1, so nobody
+ * is going to try and wake us up
+ */
+ schedule_timeout(1);
+ smp_mb();
+ if (!list_empty(&worker->pending))
+ continue;
+
+ /* still no more work?, sleep for real */
+ spin_lock_irq(&worker->lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!list_empty(&worker->pending))
+ goto again_locked;
+
+ /*
+ * this makes sure we get a wakeup when someone
+ * adds something new to the queue
+ */
+ worker->working = 0;
+ spin_unlock_irq(&worker->lock);
+
schedule();
+ }
__set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
@@ -350,13 +387,14 @@ int btrfs_requeue_work(struct btrfs_work *work)
{
struct btrfs_worker_thread *worker = work->worker;
unsigned long flags;
+ int wake = 0;
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
goto out;
spin_lock_irqsave(&worker->lock, flags);
- atomic_inc(&worker->num_pending);
list_add_tail(&work->list, &worker->pending);
+ atomic_inc(&worker->num_pending);
/* by definition we're busy, take ourselves off the idle
* list
@@ -368,10 +406,16 @@ int btrfs_requeue_work(struct btrfs_work *work)
&worker->workers->worker_list);
spin_unlock_irqrestore(&worker->workers->lock, flags);
}
+ if (!worker->working) {
+ wake = 1;
+ worker->working = 1;
+ }
spin_unlock_irqrestore(&worker->lock, flags);
-
+ if (wake)
+ wake_up_process(worker->task);
out:
+
return 0;
}
@@ -398,9 +442,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
}
spin_lock_irqsave(&worker->lock, flags);
+
+ list_add_tail(&work->list, &worker->pending);
atomic_inc(&worker->num_pending);
check_busy_worker(worker);
- list_add_tail(&work->list, &worker->pending);
/*
* avoid calling into wake_up_process if this thread has already
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ee848d8585d..ab07627084f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -32,7 +32,6 @@
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/bit_spinlock.h>
-#include <linux/version.h>
#include <linux/pagevec.h>
#include "compat.h"
#include "ctree.h"
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 9e46c077681..42491d728e9 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -38,22 +38,64 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
-inline void btrfs_init_path(struct btrfs_path *p)
-{
- memset(p, 0, sizeof(*p));
-}
-
struct btrfs_path *btrfs_alloc_path(void)
{
struct btrfs_path *path;
- path = kmem_cache_alloc(btrfs_path_cachep, GFP_NOFS);
- if (path) {
- btrfs_init_path(path);
+ path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
+ if (path)
path->reada = 1;
- }
return path;
}
+/*
+ * set all locked nodes in the path to blocking locks. This should
+ * be done before scheduling
+ */
+noinline void btrfs_set_path_blocking(struct btrfs_path *p)
+{
+ int i;
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+ if (p->nodes[i] && p->locks[i])
+ btrfs_set_lock_blocking(p->nodes[i]);
+ }
+}
+
+/*
+ * reset all the locked nodes in the patch to spinning locks.
+ *
+ * held is used to keep lockdep happy, when lockdep is enabled
+ * we set held to a blocking lock before we go around and
+ * retake all the spinlocks in the path. You can safely use NULL
+ * for held
+ */
+noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
+ struct extent_buffer *held)
+{
+ int i;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /* lockdep really cares that we take all of these spinlocks
+ * in the right order. If any of the locks in the path are not
+ * currently blocking, it is going to complain. So, make really
+ * really sure by forcing the path to blocking before we clear
+ * the path blocking.
+ */
+ if (held)
+ btrfs_set_lock_blocking(held);
+ btrfs_set_path_blocking(p);
+#endif
+
+ for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
+ if (p->nodes[i] && p->locks[i])
+ btrfs_clear_lock_blocking(p->nodes[i]);
+ }
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (held)
+ btrfs_clear_lock_blocking(held);
+#endif
+}
+
/* this also releases the path */
void btrfs_free_path(struct btrfs_path *p)
{
@@ -261,7 +303,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
trans->transid, level, &ins);
BUG_ON(ret);
cow = btrfs_init_new_buffer(trans, root, prealloc_dest,
- buf->len);
+ buf->len, level);
} else {
cow = btrfs_alloc_free_block(trans, root, buf->len,
parent_start,
@@ -272,6 +314,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (IS_ERR(cow))
return PTR_ERR(cow);
+ /* cow is set to blocking by btrfs_init_new_buffer */
+
copy_extent_buffer(cow, buf, 0, 0, cow->len);
btrfs_set_header_bytenr(cow, cow->start);
btrfs_set_header_generation(cow, trans->transid);
@@ -388,17 +432,20 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
WARN_ON(1);
}
- spin_lock(&root->fs_info->hash_lock);
if (btrfs_header_generation(buf) == trans->transid &&
btrfs_header_owner(buf) == root->root_key.objectid &&
!btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
*cow_ret = buf;
- spin_unlock(&root->fs_info->hash_lock);
WARN_ON(prealloc_dest);
return 0;
}
- spin_unlock(&root->fs_info->hash_lock);
+
search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
+
+ if (parent)
+ btrfs_set_lock_blocking(parent);
+ btrfs_set_lock_blocking(buf);
+
ret = __btrfs_cow_block(trans, root, buf, parent,
parent_slot, cow_ret, search_start, 0,
prealloc_dest);
@@ -504,6 +551,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
if (parent_nritems == 1)
return 0;
+ btrfs_set_lock_blocking(parent);
+
for (i = start_slot; i < end_slot; i++) {
int close = 1;
@@ -564,6 +613,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
search_start = last_block;
btrfs_tree_lock(cur);
+ btrfs_set_lock_blocking(cur);
err = __btrfs_cow_block(trans, root, cur, parent, i,
&cur, search_start,
min(16 * blocksize,
@@ -862,6 +912,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
return 0;
mid = path->nodes[level];
+
WARN_ON(!path->locks[level]);
WARN_ON(btrfs_header_generation(mid) != trans->transid);
@@ -883,8 +934,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* promote the child to a root */
child = read_node_slot(root, mid, 0);
- btrfs_tree_lock(child);
BUG_ON(!child);
+ btrfs_tree_lock(child);
+ btrfs_set_lock_blocking(child);
ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
BUG_ON(ret);
@@ -900,6 +952,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
add_root_to_dirty_list(root);
btrfs_tree_unlock(child);
+
path->locks[level] = 0;
path->nodes[level] = NULL;
clean_tree_block(trans, root, mid);
@@ -924,6 +977,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
left = read_node_slot(root, parent, pslot - 1);
if (left) {
btrfs_tree_lock(left);
+ btrfs_set_lock_blocking(left);
wret = btrfs_cow_block(trans, root, left,
parent, pslot - 1, &left, 0);
if (wret) {
@@ -934,6 +988,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
right = read_node_slot(root, parent, pslot + 1);
if (right) {
btrfs_tree_lock(right);
+ btrfs_set_lock_blocking(right);
wret = btrfs_cow_block(trans, root, right,
parent, pslot + 1, &right, 0);
if (wret) {
@@ -1109,6 +1164,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
u32 left_nr;
btrfs_tree_lock(left);
+ btrfs_set_lock_blocking(left);
+
left_nr = btrfs_header_nritems(left);
if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
wret = 1;
@@ -1155,7 +1212,10 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
*/
if (right) {
u32 right_nr;
+
btrfs_tree_lock(right);
+ btrfs_set_lock_blocking(right);
+
right_nr = btrfs_header_nritems(right);
if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
wret = 1;
@@ -1210,8 +1270,7 @@ static noinline void reada_for_search(struct btrfs_root *root,
struct btrfs_disk_key disk_key;
u32 nritems;
u64 search;
- u64 lowest_read;
- u64 highest_read;
+ u64 target;
u64 nread = 0;
int direction = path->reada;
struct extent_buffer *eb;
@@ -1235,8 +1294,7 @@ static noinline void reada_for_search(struct btrfs_root *root,
return;
}
- highest_read = search;
- lowest_read = search;
+ target = search;
nritems = btrfs_header_nritems(node);
nr = slot;
@@ -1256,27 +1314,80 @@ static noinline void reada_for_search(struct btrfs_root *root,
break;
}
search = btrfs_node_blockptr(node, nr);
- if ((search >= lowest_read && search <= highest_read) ||
- (search < lowest_read && lowest_read - search <= 16384) ||
- (search > highest_read && search - highest_read <= 16384)) {
+ if ((search <= target && target - search <= 65536) ||
+ (search > target && search - target <= 65536)) {
readahead_tree_block(root, search, blocksize,
btrfs_node_ptr_generation(node, nr));
nread += blocksize;
}
nscan++;
- if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32))
+ if ((nread > 65536 || nscan > 32))
break;
+ }
+}
- if (nread > (256 * 1024) || nscan > 128)
- break;
+/*
+ * returns -EAGAIN if it had to drop the path, or zero if everything was in
+ * cache
+ */
+static noinline int reada_for_balance(struct btrfs_root *root,
+ struct btrfs_path *path, int level)
+{
+ int slot;
+ int nritems;
+ struct extent_buffer *parent;
+ struct extent_buffer *eb;
+ u64 gen;
+ u64 block1 = 0;
+ u64 block2 = 0;
+ int ret = 0;
+ int blocksize;
+
+ parent = path->nodes[level - 1];
+ if (!parent)
+ return 0;
- if (search < lowest_read)
- lowest_read = search;
- if (search > highest_read)
- highest_read = search;
+ nritems = btrfs_header_nritems(parent);
+ slot = path->slots[level];
+ blocksize = btrfs_level_size(root, level);
+
+ if (slot > 0) {
+ block1 = btrfs_node_blockptr(parent, slot - 1);
+ gen = btrfs_node_ptr_generation(parent, slot - 1);
+ eb = btrfs_find_tree_block(root, block1, blocksize);
+ if (eb && btrfs_buffer_uptodate(eb, gen))
+ block1 = 0;
+ free_extent_buffer(eb);
+ }
+ if (slot < nritems) {
+ block2 = btrfs_node_blockptr(parent, slot + 1);
+ gen = btrfs_node_ptr_generation(parent, slot + 1);
+ eb = btrfs_find_tree_block(root, block2, blocksize);
+ if (eb && btrfs_buffer_uptodate(eb, gen))
+ block2 = 0;
+ free_extent_buffer(eb);
}
+ if (block1 || block2) {
+ ret = -EAGAIN;
+ btrfs_release_path(root, path);
+ if (block1)
+ readahead_tree_block(root, block1, blocksize, 0);
+ if (block2)
+ readahead_tree_block(root, block2, blocksize, 0);
+
+ if (block1) {
+ eb = read_tree_block(root, block1, blocksize, 0);
+ free_extent_buffer(eb);
+ }
+ if (block1) {
+ eb = read_tree_block(root, block2, blocksize, 0);
+ free_extent_buffer(eb);
+ }
+ }
+ return ret;
}
+
/*
* when we walk down the tree, it is usually safe to unlock the higher layers
* in the tree. The exceptions are when our path goes through slot 0, because
@@ -1328,6 +1439,32 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
}
/*
+ * This releases any locks held in the path starting at level and
+ * going all the way up to the root.
+ *
+ * btrfs_search_slot will keep the lock held on higher nodes in a few
+ * corner cases, such as COW of the block at slot zero in the node. This
+ * ignores those rules, and it should only be called when there are no
+ * more updates to be done higher up in the tree.
+ */
+noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
+{
+ int i;
+
+ if (path->keep_locks || path->lowest_level)
+ return;
+
+ for (i = level; i < BTRFS_MAX_LEVEL; i++) {
+ if (!path->nodes[i])
+ continue;
+ if (!path->locks[i])
+ continue;
+ btrfs_tree_unlock(path->nodes[i]);
+ path->locks[i] = 0;
+ }
+}
+
+/*
* look for key in the tree. path is filled in with nodes along the way
* if key is found, we return zero and you can find the item in the leaf
* level of the path (level 0)
@@ -1387,32 +1524,30 @@ again:
int wret;
/* is a cow on this block not required */
- spin_lock(&root->fs_info->hash_lock);
if (btrfs_header_generation(b) == trans->transid &&
btrfs_header_owner(b) == root->root_key.objectid &&
!btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
- spin_unlock(&root->fs_info->hash_lock);
goto cow_done;
}
- spin_unlock(&root->fs_info->hash_lock);
/* ok, we have to cow, is our old prealloc the right
* size?
*/
if (prealloc_block.objectid &&
prealloc_block.offset != b->len) {
+ btrfs_release_path(root, p);
btrfs_free_reserved_extent(root,
prealloc_block.objectid,
prealloc_block.offset);
prealloc_block.objectid = 0;
+ goto again;
}
/*
* for higher level blocks, try not to allocate blocks
* with the block and the parent locks held.
*/
- if (level > 1 && !prealloc_block.objectid &&
- btrfs_path_lock_waiting(p, level)) {
+ if (level > 0 && !prealloc_block.objectid) {
u32 size = b->len;
u64 hint = b->start;
@@ -1425,6 +1560,8 @@ again:
goto again;
}
+ btrfs_set_path_blocking(p);
+
wret = btrfs_cow_block(trans, root, b,
p->nodes[level + 1],
p->slots[level + 1],
@@ -1446,6 +1583,22 @@ cow_done:
if (!p->skip_locking)
p->locks[level] = 1;
+ btrfs_clear_path_blocking(p, NULL);
+
+ /*
+ * we have a lock on b and as long as we aren't changing
+ * the tree, there is no way to for the items in b to change.
+ * It is safe to drop the lock on our parent before we
+ * go through the expensive btree search on b.
+ *
+ * If cow is true, then we might be changing slot zero,
+ * which may require changing the parent. So, we can't
+ * drop the lock until after we know which slot we're
+ * operating on.
+ */
+ if (!cow)
+ btrfs_unlock_up_safe(p, level + 1);
+
ret = check_block(root, p, level);
if (ret) {
ret = -1;
@@ -1453,6 +1606,7 @@ cow_done:
}
ret = bin_search(b, key, level, &slot);
+
if (level != 0) {
if (ret && slot > 0)
slot -= 1;
@@ -1460,7 +1614,16 @@ cow_done:
if ((p->search_for_split || ins_len > 0) &&
btrfs_header_nritems(b) >=
BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
- int sret = split_node(trans, root, p, level);
+ int sret;
+
+ sret = reada_for_balance(root, p, level);
+ if (sret)
+ goto again;
+
+ btrfs_set_path_blocking(p);
+ sret = split_node(trans, root, p, level);
+ btrfs_clear_path_blocking(p, NULL);
+
BUG_ON(sret > 0);
if (sret) {
ret = sret;
@@ -1468,9 +1631,19 @@ cow_done:
}
b = p->nodes[level];
slot = p->slots[level];
- } else if (ins_len < 0) {
- int sret = balance_level(trans, root, p,
- level);
+ } else if (ins_len < 0 &&
+ btrfs_header_nritems(b) <
+ BTRFS_NODEPTRS_PER_BLOCK(root) / 4) {
+ int sret;
+
+ sret = reada_for_balance(root, p, level);
+ if (sret)
+ goto again;
+
+ btrfs_set_path_blocking(p);
+ sret = balance_level(trans, root, p, level);
+ btrfs_clear_path_blocking(p, NULL);
+
if (sret) {
ret = sret;
goto done;
@@ -1504,7 +1677,7 @@ cow_done:
* of the btree by dropping locks before
* we read.
*/
- if (level > 1) {
+ if (level > 0) {
btrfs_release_path(NULL, p);
if (tmp)
free_extent_buffer(tmp);
@@ -1519,6 +1692,7 @@ cow_done:
free_extent_buffer(tmp);
goto again;
} else {
+ btrfs_set_path_blocking(p);
if (tmp)
free_extent_buffer(tmp);
if (should_reada)
@@ -1528,14 +1702,29 @@ cow_done:
b = read_node_slot(root, b, slot);
}
}
- if (!p->skip_locking)
- btrfs_tree_lock(b);
+ if (!p->skip_locking) {
+ int lret;
+
+ btrfs_clear_path_blocking(p, NULL);
+ lret = btrfs_try_spin_lock(b);
+
+ if (!lret) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_lock(b);
+ btrfs_clear_path_blocking(p, b);
+ }
+ }
} else {
p->slots[level] = slot;
if (ins_len > 0 &&
btrfs_leaf_free_space(root, b) < ins_len) {
- int sret = split_leaf(trans, root, key,
+ int sret;
+
+ btrfs_set_path_blocking(p);
+ sret = split_leaf(trans, root, key,
p, ins_len, ret == 0);
+ btrfs_clear_path_blocking(p, NULL);
+
BUG_ON(sret > 0);
if (sret) {
ret = sret;
@@ -1549,12 +1738,16 @@ cow_done:
}
ret = 1;
done:
+ /*
+ * we don't really know what they plan on doing with the path
+ * from here on, so for now just mark it as blocking
+ */
+ btrfs_set_path_blocking(p);
if (prealloc_block.objectid) {
btrfs_free_reserved_extent(root,
prealloc_block.objectid,
prealloc_block.offset);
}
-
return ret;
}
@@ -1578,6 +1771,8 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb, 0);
BUG_ON(ret);
+ btrfs_set_lock_blocking(eb);
+
parent = eb;
while (1) {
level = btrfs_header_level(parent);
@@ -1602,6 +1797,7 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
eb = read_tree_block(root, bytenr, blocksize,
generation);
btrfs_tree_lock(eb);
+ btrfs_set_lock_blocking(eb);
}
/*
@@ -1626,6 +1822,7 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
eb = read_tree_block(root, bytenr, blocksize,
generation);
btrfs_tree_lock(eb);
+ btrfs_set_lock_blocking(eb);
}
ret = btrfs_cow_block(trans, root, eb, parent, slot,
@@ -2172,6 +2369,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
right = read_node_slot(root, upper, slot + 1);
btrfs_tree_lock(right);
+ btrfs_set_lock_blocking(right);
+
free_space = btrfs_leaf_free_space(root, right);
if (free_space < data_size)
goto out_unlock;
@@ -2367,6 +2566,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
left = read_node_slot(root, path->nodes[1], slot - 1);
btrfs_tree_lock(left);
+ btrfs_set_lock_blocking(left);
+
free_space = btrfs_leaf_free_space(root, left);
if (free_space < data_size) {
ret = 1;
@@ -2825,6 +3026,12 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
path->keep_locks = 0;
BUG_ON(ret);
+ /*
+ * make sure any changes to the path from split_leaf leave it
+ * in a blocking state
+ */
+ btrfs_set_path_blocking(path);
+
leaf = path->nodes[0];
BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
@@ -3354,6 +3561,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
BUG();
}
out:
+ btrfs_unlock_up_safe(path, 1);
return ret;
}
@@ -3441,15 +3649,22 @@ noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
{
int ret;
u64 root_gen = btrfs_header_generation(path->nodes[1]);
+ u64 parent_start = path->nodes[1]->start;
+ u64 parent_owner = btrfs_header_owner(path->nodes[1]);
ret = del_ptr(trans, root, path, 1, path->slots[1]);
if (ret)
return ret;
+ /*
+ * btrfs_free_extent is expensive, we want to make sure we
+ * aren't holding any locks when we call it
+ */
+ btrfs_unlock_up_safe(path, 0);
+
ret = btrfs_free_extent(trans, root, bytenr,
btrfs_level_size(root, 0),
- path->nodes[1]->start,
- btrfs_header_owner(path->nodes[1]),
+ parent_start, parent_owner,
root_gen, 0, 1);
return ret;
}
@@ -3721,6 +3936,7 @@ find_next_key:
*/
if (slot >= nritems) {
path->slots[level] = slot;
+ btrfs_set_path_blocking(path);
sret = btrfs_find_next_key(root, path, min_key, level,
cache_only, min_trans);
if (sret == 0) {
@@ -3738,16 +3954,20 @@ find_next_key:
unlock_up(path, level, 1);
goto out;
}
+ btrfs_set_path_blocking(path);
cur = read_node_slot(root, cur, slot);
btrfs_tree_lock(cur);
+
path->locks[level - 1] = 1;
path->nodes[level - 1] = cur;
unlock_up(path, level, 1);
+ btrfs_clear_path_blocking(path, NULL);
}
out:
if (ret == 0)
memcpy(min_key, &found_key, sizeof(found_key));
+ btrfs_set_path_blocking(path);
return ret;
}
@@ -3843,6 +4063,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
if (ret < 0)
return ret;
+ btrfs_set_path_blocking(path);
nritems = btrfs_header_nritems(path->nodes[0]);
/*
* by releasing the path above we dropped all our locks. A balance
@@ -3873,6 +4094,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
free_extent_buffer(next);
}
+ /* the path was set to blocking above */
if (level == 1 && (path->locks[1] || path->skip_locking) &&
path->reada)
reada_for_search(root, path, level, slot, 0);
@@ -3881,6 +4103,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
if (!path->skip_locking) {
WARN_ON(!btrfs_tree_locked(c));
btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
}
break;
}
@@ -3897,12 +4120,15 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
path->locks[level] = 1;
if (!level)
break;
+
+ btrfs_set_path_blocking(path);
if (level == 1 && path->locks[1] && path->reada)
reada_for_search(root, path, level, slot, 0);
next = read_node_slot(root, next, 0);
if (!path->skip_locking) {
WARN_ON(!btrfs_tree_locked(path->nodes[level]));
btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
}
}
done:
@@ -3927,6 +4153,7 @@ int btrfs_previous_item(struct btrfs_root *root,
while (1) {
if (path->slots[0] == 0) {
+ btrfs_set_path_blocking(path);
ret = btrfs_prev_leaf(root, path);
if (ret != 0)
return ret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index eee060f8811..766b31ae318 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -43,11 +43,7 @@ struct btrfs_ordered_sum;
#define BTRFS_ACL_NOT_CACHED ((void *)-1)
-#ifdef CONFIG_LOCKDEP
-# define BTRFS_MAX_LEVEL 7
-#else
-# define BTRFS_MAX_LEVEL 8
-#endif
+#define BTRFS_MAX_LEVEL 8
/* holds pointers to all of the tree roots */
#define BTRFS_ROOT_TREE_OBJECTID 1ULL
@@ -454,17 +450,11 @@ struct btrfs_timespec {
__le32 nsec;
} __attribute__ ((__packed__));
-typedef enum {
+enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
BTRFS_COMPRESS_ZLIB = 1,
BTRFS_COMPRESS_LAST = 2,
-} btrfs_compression_type;
-
-/* we don't understand any encryption methods right now */
-typedef enum {
- BTRFS_ENCRYPTION_NONE = 0,
- BTRFS_ENCRYPTION_LAST = 1,
-} btrfs_encryption_type;
+};
struct btrfs_inode_item {
/* nfs style generation number */
@@ -701,9 +691,7 @@ struct btrfs_fs_info {
struct btrfs_transaction *running_transaction;
wait_queue_head_t transaction_throttle;
wait_queue_head_t transaction_wait;
-
wait_queue_head_t async_submit_wait;
- wait_queue_head_t tree_log_wait;
struct btrfs_super_block super_copy;
struct btrfs_super_block super_for_commit;
@@ -711,7 +699,6 @@ struct btrfs_fs_info {
struct super_block *sb;
struct inode *btree_inode;
struct backing_dev_info bdi;
- spinlock_t hash_lock;
struct mutex trans_mutex;
struct mutex tree_log_mutex;
struct mutex transaction_kthread_mutex;
@@ -730,10 +717,6 @@ struct btrfs_fs_info {
atomic_t async_submit_draining;
atomic_t nr_async_bios;
atomic_t async_delalloc_pages;
- atomic_t tree_log_writers;
- atomic_t tree_log_commit;
- unsigned long tree_log_batch;
- u64 tree_log_transid;
/*
* this is used by the balancing code to wait for all the pending
@@ -833,7 +816,14 @@ struct btrfs_root {
struct kobject root_kobj;
struct completion kobj_unregister;
struct mutex objectid_mutex;
+
struct mutex log_mutex;
+ wait_queue_head_t log_writer_wait;
+ wait_queue_head_t log_commit_wait[2];
+ atomic_t log_writers;
+ atomic_t log_commit[2];
+ unsigned long log_transid;
+ unsigned long log_batch;
u64 objectid;
u64 last_trans;
@@ -1721,7 +1711,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
u64 empty_size);
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- u64 bytenr, u32 blocksize);
+ u64 bytenr, u32 blocksize,
+ int level);
int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 num_bytes, u64 parent, u64 min_bytes,
@@ -1840,7 +1831,9 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
-void btrfs_init_path(struct btrfs_path *p);
+void btrfs_set_path_blocking(struct btrfs_path *p);
+void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
+
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr);
int btrfs_del_leaf(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 81a313874ae..adda739a021 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -16,7 +16,6 @@
* Boston, MA 021110-1307, USA.
*/
-#include <linux/version.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
@@ -76,6 +75,40 @@ struct async_submit_bio {
struct btrfs_work work;
};
+/* These are used to set the lockdep class on the extent buffer locks.
+ * The class is set by the readpage_end_io_hook after the buffer has
+ * passed csum validation but before the pages are unlocked.
+ *
+ * The lockdep class is also set by btrfs_init_new_buffer on freshly
+ * allocated blocks.
+ *
+ * The class is based on the level in the tree block, which allows lockdep
+ * to know that lower nodes nest inside the locks of higher nodes.
+ *
+ * We also add a check to make sure the highest level of the tree is
+ * the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this
+ * code needs update as well.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# if BTRFS_MAX_LEVEL != 8
+# error
+# endif
+static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
+static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
+ /* leaf */
+ "btrfs-extent-00",
+ "btrfs-extent-01",
+ "btrfs-extent-02",
+ "btrfs-extent-03",
+ "btrfs-extent-04",
+ "btrfs-extent-05",
+ "btrfs-extent-06",
+ "btrfs-extent-07",
+ /* highest possible level */
+ "btrfs-extent-08",
+};
+#endif
+
/*
* extents on the btree inode are pretty simple, there's one extent
* that covers the entire device
@@ -348,6 +381,15 @@ static int check_tree_block_fsid(struct btrfs_root *root,
return ret;
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
+{
+ lockdep_set_class_and_name(&eb->lock,
+ &btrfs_eb_class[level],
+ btrfs_eb_name[level]);
+}
+#endif
+
static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
@@ -393,6 +435,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
}
found_level = btrfs_header_level(eb);
+ btrfs_set_buffer_lockdep_class(eb, found_level);
+
ret = csum_tree_block(root, eb, 1);
if (ret)
ret = -EIO;
@@ -800,7 +844,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
if (ret == 0)
- buf->flags |= EXTENT_UPTODATE;
+ set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
else
WARN_ON(1);
return buf;
@@ -814,6 +858,10 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (btrfs_header_generation(buf) ==
root->fs_info->running_transaction->transid) {
WARN_ON(!btrfs_tree_locked(buf));
+
+ /* ugh, clear_extent_buffer_dirty can be expensive */
+ btrfs_set_lock_blocking(buf);
+
clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
buf);
}
@@ -850,6 +898,14 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
spin_lock_init(&root->list_lock);
mutex_init(&root->objectid_mutex);
mutex_init(&root->log_mutex);
+ init_waitqueue_head(&root->log_writer_wait);
+ init_waitqueue_head(&root->log_commit_wait[0]);
+ init_waitqueue_head(&root->log_commit_wait[1]);
+ atomic_set(&root->log_commit[0], 0);
+ atomic_set(&root->log_commit[1], 0);
+ atomic_set(&root->log_writers, 0);
+ root->log_batch = 0;
+ root->log_transid = 0;
extent_io_tree_init(&root->dirty_log_pages,
fs_info->btree_inode->i_mapping, GFP_NOFS);
@@ -934,15 +990,16 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
return 0;
}
-int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
struct btrfs_root *tree_root = fs_info->tree_root;
+ struct extent_buffer *leaf;
root = kzalloc(sizeof(*root), GFP_NOFS);
if (!root)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
__setup_root(tree_root->nodesize, tree_root->leafsize,
tree_root->sectorsize, tree_root->stripesize,
@@ -951,12 +1008,23 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
+ /*
+ * log trees do not get reference counted because they go away
+ * before a real commit is actually done. They do store pointers
+ * to file data extents, and those reference counts still get
+ * updated (along with back refs to the log tree).
+ */
root->ref_cows = 0;
- root->node = btrfs_alloc_free_block(trans, root, root->leafsize,
- 0, BTRFS_TREE_LOG_OBJECTID,
- trans->transid, 0, 0, 0);
+ leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
+ 0, BTRFS_TREE_LOG_OBJECTID,
+ trans->transid, 0, 0, 0);
+ if (IS_ERR(leaf)) {
+ kfree(root);
+ return ERR_CAST(leaf);
+ }
+ root->node = leaf;
btrfs_set_header_nritems(root->node, 0);
btrfs_set_header_level(root->node, 0);
btrfs_set_header_bytenr(root->node, root->node->start);
@@ -968,7 +1036,48 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
BTRFS_FSID_SIZE);
btrfs_mark_buffer_dirty(root->node);
btrfs_tree_unlock(root->node);
- fs_info->log_root_tree = root;
+ return root;
+}
+
+int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *log_root;
+
+ log_root = alloc_log_tree(trans, fs_info);
+ if (IS_ERR(log_root))
+ return PTR_ERR(log_root);
+ WARN_ON(fs_info->log_root_tree);
+ fs_info->log_root_tree = log_root;
+ return 0;
+}
+
+int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_root *log_root;
+ struct btrfs_inode_item *inode_item;
+
+ log_root = alloc_log_tree(trans, root->fs_info);
+ if (IS_ERR(log_root))
+ return PTR_ERR(log_root);
+
+ log_root->last_trans = trans->transid;
+ log_root->root_key.offset = root->root_key.objectid;
+
+ inode_item = &log_root->root_item.inode;
+ inode_item->generation = cpu_to_le64(1);
+ inode_item->size = cpu_to_le64(3);
+ inode_item->nlink = cpu_to_le32(1);
+ inode_item->nbytes = cpu_to_le64(root->leafsize);
+ inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
+
+ btrfs_set_root_bytenr(&log_root->root_item, log_root->node->start);
+ btrfs_set_root_generation(&log_root->root_item, trans->transid);
+
+ WARN_ON(root->log_root);
+ root->log_root = log_root;
+ root->log_transid = 0;
return 0;
}
@@ -1136,7 +1245,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
{
struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
int ret = 0;
- struct list_head *cur;
struct btrfs_device *device;
struct backing_dev_info *bdi;
#if 0
@@ -1144,8 +1252,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
btrfs_congested_async(info, 0))
return 1;
#endif
- list_for_each(cur, &info->fs_devices->devices) {
- device = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
if (!device->bdev)
continue;
bdi = blk_get_backing_dev_info(device->bdev);
@@ -1163,13 +1270,11 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
*/
static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
- struct list_head *cur;
struct btrfs_device *device;
struct btrfs_fs_info *info;
info = (struct btrfs_fs_info *)bdi->unplug_io_data;
- list_for_each(cur, &info->fs_devices->devices) {
- device = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
if (!device->bdev)
continue;
@@ -1447,7 +1552,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->hashers);
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
- spin_lock_init(&fs_info->hash_lock);
spin_lock_init(&fs_info->delalloc_lock);
spin_lock_init(&fs_info->new_trans_lock);
spin_lock_init(&fs_info->ref_cache_lock);
@@ -1535,10 +1639,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->transaction_throttle);
init_waitqueue_head(&fs_info->transaction_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
- init_waitqueue_head(&fs_info->tree_log_wait);
- atomic_set(&fs_info->tree_log_commit, 0);
- atomic_set(&fs_info->tree_log_writers, 0);
- fs_info->tree_log_transid = 0;
__setup_root(4096, 4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
@@ -1627,6 +1727,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
* low idle thresh
*/
fs_info->endio_workers.idle_thresh = 4;
+ fs_info->endio_meta_workers.idle_thresh = 4;
+
fs_info->endio_write_workers.idle_thresh = 64;
fs_info->endio_meta_write_workers.idle_thresh = 64;
@@ -1720,7 +1822,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
ret = find_and_setup_root(tree_root, fs_info,
BTRFS_DEV_TREE_OBJECTID, dev_root);
dev_root->track_dirty = 1;
-
if (ret)
goto fail_extent_root;
@@ -1740,13 +1841,13 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
"btrfs-cleaner");
- if (!fs_info->cleaner_kthread)
+ if (IS_ERR(fs_info->cleaner_kthread))
goto fail_csum_root;
fs_info->transaction_kthread = kthread_run(transaction_kthread,
tree_root,
"btrfs-transaction");
- if (!fs_info->transaction_kthread)
+ if (IS_ERR(fs_info->transaction_kthread))
goto fail_cleaner;
if (btrfs_super_log_root(disk_super) != 0) {
@@ -1828,13 +1929,14 @@ fail_sb_buffer:
fail_iput:
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
iput(fs_info->btree_inode);
-fail:
+
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
+ bdi_destroy(&fs_info->bdi);
+fail:
kfree(extent_root);
kfree(tree_root);
- bdi_destroy(&fs_info->bdi);
kfree(fs_info);
kfree(chunk_root);
kfree(dev_root);
@@ -1995,7 +2097,6 @@ static int write_dev_supers(struct btrfs_device *device,
int write_all_supers(struct btrfs_root *root, int max_mirrors)
{
- struct list_head *cur;
struct list_head *head = &root->fs_info->fs_devices->devices;
struct btrfs_device *dev;
struct btrfs_super_block *sb;
@@ -2011,8 +2112,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
sb = &root->fs_info->super_for_commit;
dev_item = &sb->dev_item;
- list_for_each(cur, head) {
- dev = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(dev, head, dev_list) {
if (!dev->bdev) {
total_errors++;
continue;
@@ -2045,8 +2145,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
}
total_errors = 0;
- list_for_each(cur, head) {
- dev = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(dev, head, dev_list) {
if (!dev->bdev)
continue;
if (!dev->in_fs_metadata || !dev->writeable)
@@ -2260,6 +2359,8 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
u64 transid = btrfs_header_generation(buf);
struct inode *btree_inode = root->fs_info->btree_inode;
+ btrfs_set_lock_blocking(buf);
+
WARN_ON(!btrfs_tree_locked(buf));
if (transid != root->fs_info->generation) {
printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
@@ -2302,14 +2403,13 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
int ret;
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
if (ret == 0)
- buf->flags |= EXTENT_UPTODATE;
+ set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
return ret;
}
int btree_lock_page_hook(struct page *page)
{
struct inode *inode = page->mapping->host;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_buffer *eb;
unsigned long len;
@@ -2324,9 +2424,7 @@ int btree_lock_page_hook(struct page *page)
goto out;
btrfs_tree_lock(eb);
- spin_lock(&root->fs_info->hash_lock);
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
- spin_unlock(&root->fs_info->hash_lock);
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
out:
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index c0ff404c31b..95029db227b 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -98,5 +98,17 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
+int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
int btree_lock_page_hook(struct page *page);
+
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level);
+#else
+static inline void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb,
+ int level)
+{
+}
+#endif
#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 293da650873..0a5d796c9f7 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -19,7 +19,7 @@
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
-#include <linux/version.h>
+#include <linux/sort.h>
#include "compat.h"
#include "hash.h"
#include "crc32c.h"
@@ -30,7 +30,6 @@
#include "volumes.h"
#include "locking.h"
#include "ref-cache.h"
-#include "compat.h"
#define PENDING_EXTENT_INSERT 0
#define PENDING_EXTENT_DELETE 1
@@ -326,10 +325,8 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
struct list_head *head = &info->space_info;
- struct list_head *cur;
struct btrfs_space_info *found;
- list_for_each(cur, head) {
- found = list_entry(cur, struct btrfs_space_info, list);
+ list_for_each_entry(found, head, list) {
if (found->flags == flags)
return found;
}
@@ -1326,8 +1323,25 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- finish_current_insert(trans, root->fs_info->extent_root, 1);
- del_pending_extents(trans, root->fs_info->extent_root, 1);
+ u64 start;
+ u64 end;
+ int ret;
+
+ while(1) {
+ finish_current_insert(trans, root->fs_info->extent_root, 1);
+ del_pending_extents(trans, root->fs_info->extent_root, 1);
+
+ /* is there more work to do? */
+ ret = find_first_extent_bit(&root->fs_info->pending_del,
+ 0, &start, &end, EXTENT_WRITEBACK);
+ if (!ret)
+ continue;
+ ret = find_first_extent_bit(&root->fs_info->extent_ins,
+ 0, &start, &end, EXTENT_WRITEBACK);
+ if (!ret)
+ continue;
+ break;
+ }
return 0;
}
@@ -1525,15 +1539,55 @@ out:
return ret;
}
-int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *orig_buf, struct extent_buffer *buf,
- u32 *nr_extents)
+/* when a block goes through cow, we update the reference counts of
+ * everything that block points to. The internal pointers of the block
+ * can be in just about any order, and it is likely to have clusters of
+ * things that are close together and clusters of things that are not.
+ *
+ * To help reduce the seeks that come with updating all of these reference
+ * counts, sort them by byte number before actual updates are done.
+ *
+ * struct refsort is used to match byte number to slot in the btree block.
+ * we sort based on the byte number and then use the slot to actually
+ * find the item.
+ *
+ * struct refsort is smaller than strcut btrfs_item and smaller than
+ * struct btrfs_key_ptr. Since we're currently limited to the page size
+ * for a btree block, there's no way for a kmalloc of refsorts for a
+ * single node to be bigger than a page.
+ */
+struct refsort {
+ u64 bytenr;
+ u32 slot;
+};
+
+/*
+ * for passing into sort()
+ */
+static int refsort_cmp(const void *a_void, const void *b_void)
+{
+ const struct refsort *a = a_void;
+ const struct refsort *b = b_void;
+
+ if (a->bytenr < b->bytenr)
+ return -1;
+ if (a->bytenr > b->bytenr)
+ return 1;
+ return 0;
+}
+
+
+noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *orig_buf,
+ struct extent_buffer *buf, u32 *nr_extents)
{
u64 bytenr;
u64 ref_root;
u64 orig_root;
u64 ref_generation;
u64 orig_generation;
+ struct refsort *sorted;
u32 nritems;
u32 nr_file_extents = 0;
struct btrfs_key key;
@@ -1542,6 +1596,8 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int level;
int ret = 0;
int faili = 0;
+ int refi = 0;
+ int slot;
int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
u64, u64, u64, u64, u64, u64, u64, u64);
@@ -1553,6 +1609,9 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
nritems = btrfs_header_nritems(buf);
level = btrfs_header_level(buf);
+ sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
+ BUG_ON(!sorted);
+
if (root->ref_cows) {
process_func = __btrfs_inc_extent_ref;
} else {
@@ -1565,6 +1624,11 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
process_func = __btrfs_update_extent_ref;
}
+ /*
+ * we make two passes through the items. In the first pass we
+ * only record the byte number and slot. Then we sort based on
+ * byte number and do the actual work based on the sorted results
+ */
for (i = 0; i < nritems; i++) {
cond_resched();
if (level == 0) {
@@ -1581,6 +1645,32 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
continue;
nr_file_extents++;
+ sorted[refi].bytenr = bytenr;
+ sorted[refi].slot = i;
+ refi++;
+ } else {
+ bytenr = btrfs_node_blockptr(buf, i);
+ sorted[refi].bytenr = bytenr;
+ sorted[refi].slot = i;
+ refi++;
+ }
+ }
+ /*
+ * if refi == 0, we didn't actually put anything into the sorted
+ * array and we're done
+ */
+ if (refi == 0)
+ goto out;
+
+ sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
+
+ for (i = 0; i < refi; i++) {
+ cond_resched();
+ slot = sorted[i].slot;
+ bytenr = sorted[i].bytenr;
+
+ if (level == 0) {
+ btrfs_item_key_to_cpu(buf, &key, slot);
ret = process_func(trans, root, bytenr,
orig_buf->start, buf->start,
@@ -1589,25 +1679,25 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
key.objectid);
if (ret) {
- faili = i;
+ faili = slot;
WARN_ON(1);
goto fail;
}
} else {
- bytenr = btrfs_node_blockptr(buf, i);
ret = process_func(trans, root, bytenr,
orig_buf->start, buf->start,
orig_root, ref_root,
orig_generation, ref_generation,
level - 1);
if (ret) {
- faili = i;
+ faili = slot;
WARN_ON(1);
goto fail;
}
}
}
out:
+ kfree(sorted);
if (nr_extents) {
if (level == 0)
*nr_extents = nr_file_extents;
@@ -1616,6 +1706,7 @@ out:
}
return 0;
fail:
+ kfree(sorted);
WARN_ON(1);
return ret;
}
@@ -2137,13 +2228,12 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
u64 end;
u64 priv;
u64 search = 0;
- u64 skipped = 0;
struct btrfs_fs_info *info = extent_root->fs_info;
struct btrfs_path *path;
struct pending_extent_op *extent_op, *tmp;
struct list_head insert_list, update_list;
int ret;
- int num_inserts = 0, max_inserts;
+ int num_inserts = 0, max_inserts, restart = 0;
path = btrfs_alloc_path();
INIT_LIST_HEAD(&insert_list);
@@ -2159,18 +2249,19 @@ again:
ret = find_first_extent_bit(&info->extent_ins, search, &start,
&end, EXTENT_WRITEBACK);
if (ret) {
- if (skipped && all && !num_inserts) {
- skipped = 0;
+ if (restart && !num_inserts &&
+ list_empty(&update_list)) {
+ restart = 0;
search = 0;
continue;
}
- mutex_unlock(&info->extent_ins_mutex);
break;
}
ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
if (!ret) {
- skipped = 1;
+ if (all)
+ restart = 1;
search = end + 1;
if (need_resched()) {
mutex_unlock(&info->extent_ins_mutex);
@@ -2189,7 +2280,7 @@ again:
list_add_tail(&extent_op->list, &insert_list);
search = end + 1;
if (num_inserts == max_inserts) {
- mutex_unlock(&info->extent_ins_mutex);
+ restart = 1;
break;
}
} else if (extent_op->type == PENDING_BACKREF_UPDATE) {
@@ -2205,7 +2296,6 @@ again:
* somebody marked this thing for deletion then just unlock it and be
* done, the free_extents will handle it
*/
- mutex_lock(&info->extent_ins_mutex);
list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
clear_extent_bits(&info->extent_ins, extent_op->bytenr,
extent_op->bytenr + extent_op->num_bytes - 1,
@@ -2227,6 +2317,10 @@ again:
if (!list_empty(&update_list)) {
ret = update_backrefs(trans, extent_root, path, &update_list);
BUG_ON(ret);
+
+ /* we may have COW'ed new blocks, so lets start over */
+ if (all)
+ restart = 1;
}
/*
@@ -2234,9 +2328,9 @@ again:
* need to make sure everything is cleaned then reset everything and
* go back to the beginning
*/
- if (!num_inserts && all && skipped) {
+ if (!num_inserts && restart) {
search = 0;
- skipped = 0;
+ restart = 0;
INIT_LIST_HEAD(&update_list);
INIT_LIST_HEAD(&insert_list);
goto again;
@@ -2293,27 +2387,19 @@ again:
BUG_ON(ret);
/*
- * if we broke out of the loop in order to insert stuff because we hit
- * the maximum number of inserts at a time we can handle, then loop
- * back and pick up where we left off
- */
- if (num_inserts == max_inserts) {
- INIT_LIST_HEAD(&insert_list);
- INIT_LIST_HEAD(&update_list);
- num_inserts = 0;
- goto again;
- }
-
- /*
- * again, if we need to make absolutely sure there are no more pending
- * extent operations left and we know that we skipped some, go back to
- * the beginning and do it all again
+ * if restart is set for whatever reason we need to go back and start
+ * searching through the pending list again.
+ *
+ * We just inserted some extents, which could have resulted in new
+ * blocks being allocated, which would result in new blocks needing
+ * updates, so if all is set we _must_ restart to get the updated
+ * blocks.
*/
- if (all && skipped) {
+ if (restart || all) {
INIT_LIST_HEAD(&insert_list);
INIT_LIST_HEAD(&update_list);
search = 0;
- skipped = 0;
+ restart = 0;
num_inserts = 0;
goto again;
}
@@ -2547,6 +2633,7 @@ again:
if (ret) {
if (all && skipped && !nr) {
search = 0;
+ skipped = 0;
continue;
}
mutex_unlock(&info->extent_ins_mutex);
@@ -2633,6 +2720,8 @@ again:
goto again;
}
+ if (!err)
+ finish_current_insert(trans, extent_root, 0);
return err;
}
@@ -2700,13 +2789,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
/* if metadata always pin */
if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
- struct btrfs_block_group_cache *cache;
-
- /* btrfs_free_reserved_extent */
- cache = btrfs_lookup_block_group(root->fs_info, bytenr);
- BUG_ON(!cache);
- btrfs_add_free_space(cache, bytenr, num_bytes);
- put_block_group(cache);
+ mutex_lock(&root->fs_info->pinned_mutex);
+ btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
+ mutex_unlock(&root->fs_info->pinned_mutex);
update_reserved_extents(root, bytenr, num_bytes, 0);
return 0;
}
@@ -2787,7 +2872,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (data & BTRFS_BLOCK_GROUP_METADATA) {
last_ptr = &root->fs_info->last_alloc;
- empty_cluster = 64 * 1024;
+ if (!btrfs_test_opt(root, SSD))
+ empty_cluster = 64 * 1024;
}
if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
@@ -3014,7 +3100,6 @@ loop_check:
static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
{
struct btrfs_block_group_cache *cache;
- struct list_head *l;
printk(KERN_INFO "space_info has %llu free, is %sfull\n",
(unsigned long long)(info->total_bytes - info->bytes_used -
@@ -3022,8 +3107,7 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
(info->full) ? "" : "not ");
down_read(&info->groups_sem);
- list_for_each(l, &info->block_groups) {
- cache = list_entry(l, struct btrfs_block_group_cache, list);
+ list_for_each_entry(cache, &info->block_groups, list) {
spin_lock(&cache->lock);
printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
"%llu pinned %llu reserved\n",
@@ -3332,7 +3416,8 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- u64 bytenr, u32 blocksize)
+ u64 bytenr, u32 blocksize,
+ int level)
{
struct extent_buffer *buf;
@@ -3340,9 +3425,13 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
if (!buf)
return ERR_PTR(-ENOMEM);
btrfs_set_header_generation(buf, trans->transid);
+ btrfs_set_buffer_lockdep_class(buf, level);
btrfs_tree_lock(buf);
clean_tree_block(trans, root, buf);
+
+ btrfs_set_lock_blocking(buf);
btrfs_set_buffer_uptodate(buf);
+
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
set_extent_dirty(&root->dirty_log_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
@@ -3351,6 +3440,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
buf->start + buf->len - 1, GFP_NOFS);
}
trans->blocks_used++;
+ /* this returns a buffer locked for blocking */
return buf;
}
@@ -3379,7 +3469,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
return ERR_PTR(ret);
}
- buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
+ buf = btrfs_init_new_buffer(trans, root, ins.objectid,
+ blocksize, level);
return buf;
}
@@ -3388,36 +3479,73 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
{
u64 leaf_owner;
u64 leaf_generation;
+ struct refsort *sorted;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
int i;
int nritems;
int ret;
+ int refi = 0;
+ int slot;
BUG_ON(!btrfs_is_leaf(leaf));
nritems = btrfs_header_nritems(leaf);
leaf_owner = btrfs_header_owner(leaf);
leaf_generation = btrfs_header_generation(leaf);
+ sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
+ /* we do this loop twice. The first time we build a list
+ * of the extents we have a reference on, then we sort the list
+ * by bytenr. The second time around we actually do the
+ * extent freeing.
+ */
for (i = 0; i < nritems; i++) {
u64 disk_bytenr;
cond_resched();
btrfs_item_key_to_cpu(leaf, &key, i);
+
+ /* only extents have references, skip everything else */
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
continue;
+
fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
+
+ /* inline extents live in the btree, they don't have refs */
if (btrfs_file_extent_type(leaf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
continue;
- /*
- * FIXME make sure to insert a trans record that
- * repeats the snapshot del on crash
- */
+
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+
+ /* holes don't have refs */
if (disk_bytenr == 0)
continue;
+ sorted[refi].bytenr = disk_bytenr;
+ sorted[refi].slot = i;
+ refi++;
+ }
+
+ if (refi == 0)
+ goto out;
+
+ sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
+
+ for (i = 0; i < refi; i++) {
+ u64 disk_bytenr;
+
+ disk_bytenr = sorted[i].bytenr;
+ slot = sorted[i].slot;
+
+ cond_resched();
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
+ continue;
+
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
ret = __btrfs_free_extent(trans, root, disk_bytenr,
btrfs_file_extent_disk_num_bytes(leaf, fi),
leaf->start, leaf_owner, leaf_generation,
@@ -3428,6 +3556,8 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
wake_up(&root->fs_info->transaction_throttle);
cond_resched();
}
+out:
+ kfree(sorted);
return 0;
}
@@ -3437,9 +3567,25 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
{
int i;
int ret;
- struct btrfs_extent_info *info = ref->extents;
+ struct btrfs_extent_info *info;
+ struct refsort *sorted;
+
+ if (ref->nritems == 0)
+ return 0;
+
+ sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
+ for (i = 0; i < ref->nritems; i++) {
+ sorted[i].bytenr = ref->extents[i].bytenr;
+ sorted[i].slot = i;
+ }
+ sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
+ /*
+ * the items in the ref were sorted when the ref was inserted
+ * into the ref cache, so this is already in order
+ */
for (i = 0; i < ref->nritems; i++) {
+ info = ref->extents + sorted[i].slot;
ret = __btrfs_free_extent(trans, root, info->bytenr,
info->num_bytes, ref->bytenr,
ref->owner, ref->generation,
@@ -3453,6 +3599,7 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
info++;
}
+ kfree(sorted);
return 0;
}
@@ -3497,6 +3644,152 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
}
/*
+ * this is used while deleting old snapshots, and it drops the refs
+ * on a whole subtree starting from a level 1 node.
+ *
+ * The idea is to sort all the leaf pointers, and then drop the
+ * ref on all the leaves in order. Most of the time the leaves
+ * will have ref cache entries, so no leaf IOs will be required to
+ * find the extents they have references on.
+ *
+ * For each leaf, any references it has are also dropped in order
+ *
+ * This ends up dropping the references in something close to optimal
+ * order for reading and modifying the extent allocation tree.
+ */
+static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path)
+{
+ u64 bytenr;
+ u64 root_owner;
+ u64 root_gen;
+ struct extent_buffer *eb = path->nodes[1];
+ struct extent_buffer *leaf;
+ struct btrfs_leaf_ref *ref;
+ struct refsort *sorted = NULL;
+ int nritems = btrfs_header_nritems(eb);
+ int ret;
+ int i;
+ int refi = 0;
+ int slot = path->slots[1];
+ u32 blocksize = btrfs_level_size(root, 0);
+ u32 refs;
+
+ if (nritems == 0)
+ goto out;
+
+ root_owner = btrfs_header_owner(eb);
+ root_gen = btrfs_header_generation(eb);
+ sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
+
+ /*
+ * step one, sort all the leaf pointers so we don't scribble
+ * randomly into the extent allocation tree
+ */
+ for (i = slot; i < nritems; i++) {
+ sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
+ sorted[refi].slot = i;
+ refi++;
+ }
+
+ /*
+ * nritems won't be zero, but if we're picking up drop_snapshot
+ * after a crash, slot might be > 0, so double check things
+ * just in case.
+ */
+ if (refi == 0)
+ goto out;
+
+ sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
+
+ /*
+ * the first loop frees everything the leaves point to
+ */
+ for (i = 0; i < refi; i++) {
+ u64 ptr_gen;
+
+ bytenr = sorted[i].bytenr;
+
+ /*
+ * check the reference count on this leaf. If it is > 1
+ * we just decrement it below and don't update any
+ * of the refs the leaf points to.
+ */
+ ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
+ BUG_ON(ret);
+ if (refs != 1)
+ continue;
+
+ ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
+
+ /*
+ * the leaf only had one reference, which means the
+ * only thing pointing to this leaf is the snapshot
+ * we're deleting. It isn't possible for the reference
+ * count to increase again later
+ *
+ * The reference cache is checked for the leaf,
+ * and if found we'll be able to drop any refs held by
+ * the leaf without needing to read it in.
+ */
+ ref = btrfs_lookup_leaf_ref(root, bytenr);
+ if (ref && ref->generation != ptr_gen) {
+ btrfs_free_leaf_ref(root, ref);
+ ref = NULL;
+ }
+ if (ref) {
+ ret = cache_drop_leaf_ref(trans, root, ref);
+ BUG_ON(ret);
+ btrfs_remove_leaf_ref(root, ref);
+ btrfs_free_leaf_ref(root, ref);
+ } else {
+ /*
+ * the leaf wasn't in the reference cache, so
+ * we have to read it.
+ */
+ leaf = read_tree_block(root, bytenr, blocksize,
+ ptr_gen);
+ ret = btrfs_drop_leaf_ref(trans, root, leaf);
+ BUG_ON(ret);
+ free_extent_buffer(leaf);
+ }
+ atomic_inc(&root->fs_info->throttle_gen);
+ wake_up(&root->fs_info->transaction_throttle);
+ cond_resched();
+ }
+
+ /*
+ * run through the loop again to free the refs on the leaves.
+ * This is faster than doing it in the loop above because
+ * the leaves are likely to be clustered together. We end up
+ * working in nice chunks on the extent allocation tree.
+ */
+ for (i = 0; i < refi; i++) {
+ bytenr = sorted[i].bytenr;
+ ret = __btrfs_free_extent(trans, root, bytenr,
+ blocksize, eb->start,
+ root_owner, root_gen, 0, 1);
+ BUG_ON(ret);
+
+ atomic_inc(&root->fs_info->throttle_gen);
+ wake_up(&root->fs_info->transaction_throttle);
+ cond_resched();
+ }
+out:
+ kfree(sorted);
+
+ /*
+ * update the path to show we've processed the entire level 1
+ * node. This will get saved into the root's drop_snapshot_progress
+ * field so these drops are not repeated again if this transaction
+ * commits.
+ */
+ path->slots[1] = nritems;
+ return 0;
+}
+
+/*
* helper function for drop_snapshot, this walks down the tree dropping ref
* counts as it goes.
*/
@@ -3511,7 +3804,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
struct extent_buffer *next;
struct extent_buffer *cur;
struct extent_buffer *parent;
- struct btrfs_leaf_ref *ref;
u32 blocksize;
int ret;
u32 refs;
@@ -3538,17 +3830,46 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
if (path->slots[*level] >=
btrfs_header_nritems(cur))
break;
+
+ /* the new code goes down to level 1 and does all the
+ * leaves pointed to that node in bulk. So, this check
+ * for level 0 will always be false.
+ *
+ * But, the disk format allows the drop_snapshot_progress
+ * field in the root to leave things in a state where
+ * a leaf will need cleaning up here. If someone crashes
+ * with the old code and then boots with the new code,
+ * we might find a leaf here.
+ */
if (*level == 0) {
ret = btrfs_drop_leaf_ref(trans, root, cur);
BUG_ON(ret);
break;
}
+
+ /*
+ * once we get to level one, process the whole node
+ * at once, including everything below it.
+ */
+ if (*level == 1) {
+ ret = drop_level_one_refs(trans, root, path);
+ BUG_ON(ret);
+ break;
+ }
+
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
blocksize = btrfs_level_size(root, *level - 1);
ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
BUG_ON(ret);
+
+ /*
+ * if there is more than one reference, we don't need
+ * to read that node to drop any references it has. We
+ * just drop the ref we hold on that node and move on to the
+ * next slot in this level.
+ */
if (refs != 1) {
parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent);
@@ -3567,46 +3888,12 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
continue;
}
+
/*
- * at this point, we have a single ref, and since the
- * only place referencing this extent is a dead root
- * the reference count should never go higher.
- * So, we don't need to check it again
+ * we need to keep freeing things in the next level down.
+ * read the block and loop around to process it
*/
- if (*level == 1) {
- ref = btrfs_lookup_leaf_ref(root, bytenr);
- if (ref && ref->generation != ptr_gen) {
- btrfs_free_leaf_ref(root, ref);
- ref = NULL;
- }
- if (ref) {
- ret = cache_drop_leaf_ref(trans, root, ref);
- BUG_ON(ret);
- btrfs_remove_leaf_ref(root, ref);
- btrfs_free_leaf_ref(root, ref);
- *level = 0;
- break;
- }
- }
- next = btrfs_find_tree_block(root, bytenr, blocksize);
- if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
- free_extent_buffer(next);
-
- next = read_tree_block(root, bytenr, blocksize,
- ptr_gen);
- cond_resched();
-#if 0
- /*
- * this is a debugging check and can go away
- * the ref should never go all the way down to 1
- * at this point
- */
- ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
- &refs);
- BUG_ON(ret);
- WARN_ON(refs != 1);
-#endif
- }
+ next = read_tree_block(root, bytenr, blocksize, ptr_gen);
WARN_ON(*level <= 0);
if (path->nodes[*level-1])
free_extent_buffer(path->nodes[*level-1]);
@@ -3631,11 +3918,16 @@ out:
root_owner = btrfs_header_owner(parent);
root_gen = btrfs_header_generation(parent);
+ /*
+ * cleanup and free the reference on the last node
+ * we processed
+ */
ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
parent->start, root_owner, root_gen,
*level, 1);
free_extent_buffer(path->nodes[*level]);
path->nodes[*level] = NULL;
+
*level += 1;
BUG_ON(ret);
@@ -3687,6 +3979,7 @@ static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
next = read_tree_block(root, bytenr, blocksize, ptr_gen);
btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
&refs);
@@ -3754,6 +4047,13 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
struct extent_buffer *node;
struct btrfs_disk_key disk_key;
+
+ /*
+ * there is more work to do in this level.
+ * Update the drop_progress marker to reflect
+ * the work we've done so far, and then bump
+ * the slot number
+ */
node = path->nodes[i];
path->slots[i]++;
*level = i;
@@ -3765,6 +4065,11 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
return 0;
} else {
struct extent_buffer *parent;
+
+ /*
+ * this whole node is done, free our reference
+ * on it and go up one level
+ */
if (path->nodes[*level] == root->node)
parent = path->nodes[*level];
else
@@ -4444,7 +4749,7 @@ static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
u64 lock_end = 0;
u64 num_bytes;
u64 ext_offset;
- u64 first_pos;
+ u64 search_end = (u64)-1;
u32 nritems;
int nr_scaned = 0;
int extent_locked = 0;
@@ -4452,7 +4757,6 @@ static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
int ret;
memcpy(&key, leaf_key, sizeof(key));
- first_pos = INT_LIMIT(loff_t) - extent_key->offset;
if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
if (key.objectid < ref_path->owner_objectid ||
(key.objectid == ref_path->owner_objectid &&
@@ -4501,7 +4805,7 @@ next:
if ((key.objectid > ref_path->owner_objectid) ||
(key.objectid == ref_path->owner_objectid &&
key.type > BTRFS_EXTENT_DATA_KEY) ||
- (key.offset >= first_pos + extent_key->offset))
+ key.offset >= search_end)
break;
}
@@ -4534,8 +4838,10 @@ next:
num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
ext_offset = btrfs_file_extent_offset(leaf, fi);
- if (first_pos > key.offset - ext_offset)
- first_pos = key.offset - ext_offset;
+ if (search_end == (u64)-1) {
+ search_end = key.offset - ext_offset +
+ btrfs_file_extent_ram_bytes(leaf, fi);
+ }
if (!extent_locked) {
lock_start = key.offset;
@@ -4724,7 +5030,7 @@ next:
}
skip:
if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
- key.offset >= first_pos + extent_key->offset)
+ key.offset >= search_end)
break;
cond_resched();
@@ -4778,6 +5084,7 @@ int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
ref->bytenr = buf->start;
ref->owner = btrfs_header_owner(buf);
ref->generation = btrfs_header_generation(buf);
+
ret = btrfs_add_leaf_ref(root, ref, 0);
WARN_ON(ret);
btrfs_free_leaf_ref(root, ref);
@@ -5351,7 +5658,9 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
prev_block = block_start;
}
+ mutex_lock(&extent_root->fs_info->trans_mutex);
btrfs_record_root_in_trans(found_root);
+ mutex_unlock(&extent_root->fs_info->trans_mutex);
if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
/*
* try to update data extent references while
@@ -5957,9 +6266,11 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
BUG_ON(!path);
- btrfs_remove_free_space_cache(block_group);
+ spin_lock(&root->fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
&root->fs_info->block_group_cache_tree);
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+ btrfs_remove_free_space_cache(block_group);
down_write(&block_group->space_info->groups_sem);
list_del(&block_group->list);
up_write(&block_group->space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e086d407f1f..ebe6b29e606 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -9,7 +9,6 @@
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
-#include <linux/version.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include "extent_io.h"
@@ -31,7 +30,7 @@ static LIST_HEAD(buffers);
static LIST_HEAD(states);
#define LEAK_DEBUG 0
-#ifdef LEAK_DEBUG
+#if LEAK_DEBUG
static DEFINE_SPINLOCK(leak_lock);
#endif
@@ -120,7 +119,7 @@ void extent_io_tree_init(struct extent_io_tree *tree,
static struct extent_state *alloc_extent_state(gfp_t mask)
{
struct extent_state *state;
-#ifdef LEAK_DEBUG
+#if LEAK_DEBUG
unsigned long flags;
#endif
@@ -130,7 +129,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
state->state = 0;
state->private = 0;
state->tree = NULL;
-#ifdef LEAK_DEBUG
+#if LEAK_DEBUG
spin_lock_irqsave(&leak_lock, flags);
list_add(&state->leak_list, &states);
spin_unlock_irqrestore(&leak_lock, flags);
@@ -145,11 +144,11 @@ static void free_extent_state(struct extent_state *state)
if (!state)
return;
if (atomic_dec_and_test(&state->refs)) {
-#ifdef LEAK_DEBUG
+#if LEAK_DEBUG
unsigned long flags;
#endif
WARN_ON(state->tree);
-#ifdef LEAK_DEBUG
+#if LEAK_DEBUG
spin_lock_irqsave(&leak_lock, flags);
list_del(&state->leak_list);
spin_unlock_irqrestore(&leak_lock, flags);
@@ -416,8 +415,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
if (node) {
- struct extent_state *found;
- found = rb_entry(node, struct extent_state, rb_node);
free_extent_state(prealloc);
return -EEXIST;
}
@@ -2378,11 +2375,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
int scanned = 0;
int range_whole = 0;
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- return 0;
- }
-
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
@@ -2855,6 +2847,98 @@ out:
return sector;
}
+int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len, get_extent_t *get_extent)
+{
+ int ret;
+ u64 off = start;
+ u64 max = start + len;
+ u32 flags = 0;
+ u64 disko = 0;
+ struct extent_map *em = NULL;
+ int end = 0;
+ u64 em_start = 0, em_len = 0;
+ unsigned long emflags;
+ ret = 0;
+
+ if (len == 0)
+ return -EINVAL;
+
+ lock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
+ GFP_NOFS);
+ em = get_extent(inode, NULL, 0, off, max - off, 0);
+ if (!em)
+ goto out;
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+ while (!end) {
+ off = em->start + em->len;
+ if (off >= max)
+ end = 1;
+
+ em_start = em->start;
+ em_len = em->len;
+
+ disko = 0;
+ flags = 0;
+
+ switch (em->block_start) {
+ case EXTENT_MAP_LAST_BYTE:
+ end = 1;
+ flags |= FIEMAP_EXTENT_LAST;
+ break;
+ case EXTENT_MAP_HOLE:
+ flags |= FIEMAP_EXTENT_UNWRITTEN;
+ break;
+ case EXTENT_MAP_INLINE:
+ flags |= (FIEMAP_EXTENT_DATA_INLINE |
+ FIEMAP_EXTENT_NOT_ALIGNED);
+ break;
+ case EXTENT_MAP_DELALLOC:
+ flags |= (FIEMAP_EXTENT_DELALLOC |
+ FIEMAP_EXTENT_UNKNOWN);
+ break;
+ default:
+ disko = em->block_start;
+ break;
+ }
+ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+ flags |= FIEMAP_EXTENT_ENCODED;
+
+ emflags = em->flags;
+ free_extent_map(em);
+ em = NULL;
+
+ if (!end) {
+ em = get_extent(inode, NULL, 0, off, max - off, 0);
+ if (!em)
+ goto out;
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+ emflags = em->flags;
+ }
+ if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
+ flags |= FIEMAP_EXTENT_LAST;
+ end = 1;
+ }
+
+ ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+ em_len, flags);
+ if (ret)
+ goto out_free;
+ }
+out_free:
+ free_extent_map(em);
+out:
+ unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
+ GFP_NOFS);
+ return ret;
+}
+
static inline struct page *extent_buffer_page(struct extent_buffer *eb,
unsigned long i)
{
@@ -2892,15 +2976,17 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
gfp_t mask)
{
struct extent_buffer *eb = NULL;
-#ifdef LEAK_DEBUG
+#if LEAK_DEBUG
unsigned long flags;
#endif
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
eb->start = start;
eb->len = len;
- mutex_init(&eb->mutex);
-#ifdef LEAK_DEBUG
+ spin_lock_init(&eb->lock);
+ init_waitqueue_head(&eb->lock_wq);
+
+#if LEAK_DEBUG
spin_lock_irqsave(&leak_lock, flags);
list_add(&eb->leak_list, &buffers);
spin_unlock_irqrestore(&leak_lock, flags);
@@ -2912,7 +2998,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
static void __free_extent_buffer(struct extent_buffer *eb)
{
-#ifdef LEAK_DEBUG
+#if LEAK_DEBUG
unsigned long flags;
spin_lock_irqsave(&leak_lock, flags);
list_del(&eb->leak_list);
@@ -2980,8 +3066,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
unlock_page(p);
}
if (uptodate)
- eb->flags |= EXTENT_UPTODATE;
- eb->flags |= EXTENT_BUFFER_FILLED;
+ set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
spin_lock(&tree->buffer_lock);
exists = buffer_tree_insert(tree, start, &eb->rb_node);
@@ -3135,7 +3220,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
unsigned long num_pages;
num_pages = num_extent_pages(eb->start, eb->len);
- eb->flags &= ~EXTENT_UPTODATE;
+ clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
GFP_NOFS);
@@ -3206,7 +3291,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
struct page *page;
int pg_uptodate = 1;
- if (eb->flags & EXTENT_UPTODATE)
+ if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 1;
ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
@@ -3242,7 +3327,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
struct bio *bio = NULL;
unsigned long bio_flags = 0;
- if (eb->flags & EXTENT_UPTODATE)
+ if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
@@ -3273,7 +3358,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
}
if (all_uptodate) {
if (start_i == 0)
- eb->flags |= EXTENT_UPTODATE;
+ set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
goto unlock_exit;
}
@@ -3309,7 +3394,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
}
if (!ret)
- eb->flags |= EXTENT_UPTODATE;
+ set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
return ret;
unlock_exit:
@@ -3406,7 +3491,6 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
unmap_extent_buffer(eb, eb->map_token, km);
eb->map_token = NULL;
save = 1;
- WARN_ON(!mutex_is_locked(&eb->mutex));
}
err = map_private_extent_buffer(eb, start, min_len, token, map,
map_start, map_len, km);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index c5b483a7913..1f9df88afbf 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -22,6 +22,10 @@
/* flags for bio submission */
#define EXTENT_BIO_COMPRESSED 1
+/* these are bit numbers for test/set bit */
+#define EXTENT_BUFFER_UPTODATE 0
+#define EXTENT_BUFFER_BLOCKING 1
+
/*
* page->private values. Every page that is controlled by the extent
* map has page->private set to one.
@@ -95,11 +99,19 @@ struct extent_buffer {
unsigned long map_start;
unsigned long map_len;
struct page *first_page;
+ unsigned long bflags;
atomic_t refs;
- int flags;
struct list_head leak_list;
struct rb_node rb_node;
- struct mutex mutex;
+
+ /* the spinlock is used to protect most operations */
+ spinlock_t lock;
+
+ /*
+ * when we keep the lock held while blocking, waiters go onto
+ * the wq
+ */
+ wait_queue_head_t lock_wq;
};
struct extent_map_tree;
@@ -193,6 +205,8 @@ int extent_commit_write(struct extent_io_tree *tree,
unsigned from, unsigned to);
sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
get_extent_t *get_extent);
+int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len, get_extent_t *get_extent);
int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 4a83e33ada3..50da69da20c 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -3,7 +3,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <linux/version.h>
#include <linux/hardirq.h>
#include "extent_map.h"
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 90268334145..872f104576e 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -29,7 +29,6 @@
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
-#include <linux/version.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -1215,15 +1214,15 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
}
mutex_unlock(&root->fs_info->trans_mutex);
- root->fs_info->tree_log_batch++;
+ root->log_batch++;
filemap_fdatawrite(inode->i_mapping);
btrfs_wait_ordered_range(inode, 0, (u64)-1);
- root->fs_info->tree_log_batch++;
+ root->log_batch++;
/*
* ok we haven't committed the transaction yet, lets do a commit
*/
- if (file->private_data)
+ if (file && file->private_data)
btrfs_ioctl_trans_end(file);
trans = btrfs_start_transaction(root, 1);
@@ -1232,7 +1231,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
goto out;
}
- ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
+ ret = btrfs_log_dentry_safe(trans, root, dentry);
if (ret < 0)
goto out;
@@ -1246,7 +1245,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
- mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+ mutex_unlock(&dentry->d_inode->i_mutex);
if (ret > 0) {
ret = btrfs_commit_transaction(trans, root);
@@ -1254,7 +1253,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
btrfs_sync_log(trans, root);
ret = btrfs_end_transaction(trans, root);
}
- mutex_lock(&file->f_dentry->d_inode->i_mutex);
+ mutex_lock(&dentry->d_inode->i_mutex);
out:
return ret > 0 ? EIO : ret;
}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 2aa79873eb4..cc7334d833c 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -84,7 +84,6 @@ int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
search_key.type = 0;
search_key.offset = 0;
- btrfs_init_path(path);
start_found = 0;
ret = btrfs_search_slot(trans, root, &search_key, path, 0, 0);
if (ret < 0)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8adfe059ab4..3cee77ae03c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -34,7 +34,6 @@
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
-#include <linux/version.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/falloc.h>
@@ -51,6 +50,7 @@
#include "tree-log.h"
#include "ref-cache.h"
#include "compression.h"
+#include "locking.h"
struct btrfs_iget_args {
u64 ino;
@@ -91,6 +91,16 @@ static noinline int cow_file_range(struct inode *inode,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
+static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
+{
+ int err;
+
+ err = btrfs_init_acl(inode, dir);
+ if (!err)
+ err = btrfs_xattr_security_init(inode, dir);
+ return err;
+}
+
/*
* a very lame attempt at stopping writes when the FS is 85% full. There
* are countless ways this is incorrect, but it is better than nothing.
@@ -350,6 +360,19 @@ again:
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
+ /*
+ * we don't want to send crud past the end of i_size through
+ * compression, that's just a waste of CPU time. So, if the
+ * end of the file is before the start of our current
+ * requested range of bytes, we bail out to the uncompressed
+ * cleanup code that can deal with all of this.
+ *
+ * It isn't really the fastest way to fix things, but this is a
+ * very uncommon corner.
+ */
+ if (actual_end <= start)
+ goto cleanup_and_bail_uncompressed;
+
total_compressed = actual_end - start;
/* we want to make sure that amount of ram required to uncompress
@@ -494,6 +517,7 @@ again:
goto again;
}
} else {
+cleanup_and_bail_uncompressed:
/*
* No compression, but we still need to write the pages in
* the file we've been given so far. redirty the locked
@@ -1324,12 +1348,11 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_offset,
struct list_head *list)
{
- struct list_head *cur;
struct btrfs_ordered_sum *sum;
btrfs_set_trans_block_group(trans, inode);
- list_for_each(cur, list) {
- sum = list_entry(cur, struct btrfs_ordered_sum, list);
+
+ list_for_each_entry(sum, list, list) {
btrfs_csum_file_blocks(trans,
BTRFS_I(inode)->root->fs_info->csum_root, sum);
}
@@ -2013,6 +2036,7 @@ void btrfs_read_locked_inode(struct inode *inode)
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
+
BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
alloc_group_block, 0);
btrfs_free_path(path);
@@ -2039,6 +2063,7 @@ void btrfs_read_locked_inode(struct inode *inode)
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
break;
default:
+ inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
break;
}
@@ -2108,6 +2133,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
goto failed;
}
+ btrfs_unlock_up_safe(path, 1);
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
@@ -2429,6 +2455,8 @@ next_node:
ref->generation = leaf_gen;
ref->nritems = 0;
+ btrfs_sort_leaf_ref(ref);
+
ret = btrfs_add_leaf_ref(root, ref, 0);
WARN_ON(ret);
btrfs_free_leaf_ref(root, ref);
@@ -2476,7 +2504,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key found_key;
- u32 found_type;
+ u32 found_type = (u8)-1;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
u64 extent_start = 0;
@@ -2503,8 +2531,6 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
key.offset = (u64)-1;
key.type = (u8)-1;
- btrfs_init_path(path);
-
search_again:
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
@@ -2663,6 +2689,8 @@ next:
if (pending_del_nr)
goto del_pending;
btrfs_release_path(root, path);
+ if (found_type == BTRFS_INODE_ITEM_KEY)
+ break;
goto search_again;
}
@@ -2679,6 +2707,8 @@ del_pending:
BUG_ON(ret);
pending_del_nr = 0;
btrfs_release_path(root, path);
+ if (found_type == BTRFS_INODE_ITEM_KEY)
+ break;
goto search_again;
}
}
@@ -3265,7 +3295,7 @@ skip:
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
- filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
+ filp->f_pos = INT_LIMIT(off_t);
else
filp->f_pos++;
nopos:
@@ -3458,7 +3488,14 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
root->highest_inode = objectid;
inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
+
+ if (dir && (dir->i_mode & S_ISGID)) {
+ inode->i_gid = dir->i_gid;
+ if (S_ISDIR(mode))
+ mode |= S_ISGID;
+ } else
+ inode->i_gid = current_fsgid();
+
inode->i_mode = mode;
inode->i_ino = objectid;
inode_set_bytes(inode, 0);
@@ -3586,7 +3623,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_acl(inode, dir);
+ err = btrfs_init_inode_security(inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -3649,7 +3686,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_acl(inode, dir);
+ err = btrfs_init_inode_security(inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -3772,7 +3809,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
drop_on_err = 1;
- err = btrfs_init_acl(inode, dir);
+ err = btrfs_init_inode_security(inode, dir);
if (err)
goto out_fail;
@@ -4158,9 +4195,10 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
return -EINVAL;
}
-static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
+static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len)
{
- return extent_bmap(mapping, iblock, btrfs_get_extent);
+ return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
}
int btrfs_readpage(struct file *file, struct page *page)
@@ -4223,7 +4261,7 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
if (PageWriteback(page) || PageDirty(page))
return 0;
- return __btrfs_releasepage(page, gfp_flags);
+ return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
}
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
@@ -4733,7 +4771,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_acl(inode, dir);
+ err = btrfs_init_inode_security(inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -4987,13 +5025,24 @@ static struct extent_io_ops btrfs_extent_io_ops = {
.clear_bit_hook = btrfs_clear_bit_hook,
};
+/*
+ * btrfs doesn't support the bmap operation because swapfiles
+ * use bmap to make a mapping of extents in the file. They assume
+ * these extents won't change over the life of the file and they
+ * use the bmap result to do IO directly to the drive.
+ *
+ * the btrfs bmap call would return logical addresses that aren't
+ * suitable for IO and they also will change frequently as COW
+ * operations happen. So, swapfile + btrfs == corruption.
+ *
+ * For now we're avoiding this by dropping bmap.
+ */
static struct address_space_operations btrfs_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
.sync_page = block_sync_page,
- .bmap = btrfs_bmap,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
@@ -5017,6 +5066,7 @@ static struct inode_operations btrfs_file_inode_operations = {
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.fallocate = btrfs_fallocate,
+ .fiemap = btrfs_fiemap,
};
static struct inode_operations btrfs_special_inode_operations = {
.getattr = btrfs_getattr,
@@ -5032,4 +5082,8 @@ static struct inode_operations btrfs_symlink_inode_operations = {
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.permission = btrfs_permission,
+ .setxattr = btrfs_setxattr,
+ .getxattr = btrfs_getxattr,
+ .listxattr = btrfs_listxattr,
+ .removexattr = btrfs_removexattr,
};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c2aa33e3feb..988fdc8b49e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -38,7 +38,6 @@
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
#include <linux/security.h>
-#include <linux/version.h>
#include <linux/xattr.h>
#include <linux/vmalloc.h>
#include "compat.h"
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 39bae7761db..85506c4a3af 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -25,64 +25,203 @@
#include "extent_io.h"
#include "locking.h"
+static inline void spin_nested(struct extent_buffer *eb)
+{
+ spin_lock(&eb->lock);
+}
+
/*
- * locks the per buffer mutex in an extent buffer. This uses adaptive locks
- * and the spin is not tuned very extensively. The spinning does make a big
- * difference in almost every workload, but spinning for the right amount of
- * time needs some help.
- *
- * In general, we want to spin as long as the lock holder is doing btree
- * searches, and we should give up if they are in more expensive code.
+ * Setting a lock to blocking will drop the spinlock and set the
+ * flag that forces other procs who want the lock to wait. After
+ * this you can safely schedule with the lock held.
*/
+void btrfs_set_lock_blocking(struct extent_buffer *eb)
+{
+ if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
+ set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
+ spin_unlock(&eb->lock);
+ }
+ /* exit with the spin lock released and the bit set */
+}
-int btrfs_tree_lock(struct extent_buffer *eb)
+/*
+ * clearing the blocking flag will take the spinlock again.
+ * After this you can't safely schedule
+ */
+void btrfs_clear_lock_blocking(struct extent_buffer *eb)
{
- int i;
+ if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
+ spin_nested(eb);
+ clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
+ smp_mb__after_clear_bit();
+ }
+ /* exit with the spin lock held */
+}
- if (mutex_trylock(&eb->mutex))
- return 0;
+/*
+ * unfortunately, many of the places that currently set a lock to blocking
+ * don't end up blocking for every long, and often they don't block
+ * at all. For a dbench 50 run, if we don't spin one the blocking bit
+ * at all, the context switch rate can jump up to 400,000/sec or more.
+ *
+ * So, we're still stuck with this crummy spin on the blocking bit,
+ * at least until the most common causes of the short blocks
+ * can be dealt with.
+ */
+static int btrfs_spin_on_block(struct extent_buffer *eb)
+{
+ int i;
for (i = 0; i < 512; i++) {
cpu_relax();
- if (mutex_trylock(&eb->mutex))
- return 0;
+ if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
+ return 1;
+ if (need_resched())
+ break;
}
- cpu_relax();
- mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
return 0;
}
-int btrfs_try_tree_lock(struct extent_buffer *eb)
+/*
+ * This is somewhat different from trylock. It will take the
+ * spinlock but if it finds the lock is set to blocking, it will
+ * return without the lock held.
+ *
+ * returns 1 if it was able to take the lock and zero otherwise
+ *
+ * After this call, scheduling is not safe without first calling
+ * btrfs_set_lock_blocking()
+ */
+int btrfs_try_spin_lock(struct extent_buffer *eb)
{
- return mutex_trylock(&eb->mutex);
+ int i;
+
+ spin_nested(eb);
+ if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
+ return 1;
+ spin_unlock(&eb->lock);
+
+ /* spin for a bit on the BLOCKING flag */
+ for (i = 0; i < 2; i++) {
+ if (!btrfs_spin_on_block(eb))
+ break;
+
+ spin_nested(eb);
+ if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
+ return 1;
+ spin_unlock(&eb->lock);
+ }
+ return 0;
}
-int btrfs_tree_unlock(struct extent_buffer *eb)
+/*
+ * the autoremove wake function will return 0 if it tried to wake up
+ * a process that was already awake, which means that process won't
+ * count as an exclusive wakeup. The waitq code will continue waking
+ * procs until it finds one that was actually sleeping.
+ *
+ * For btrfs, this isn't quite what we want. We want a single proc
+ * to be notified that the lock is ready for taking. If that proc
+ * already happen to be awake, great, it will loop around and try for
+ * the lock.
+ *
+ * So, btrfs_wake_function always returns 1, even when the proc that we
+ * tried to wake up was already awake.
+ */
+static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
+ int sync, void *key)
{
- mutex_unlock(&eb->mutex);
- return 0;
+ autoremove_wake_function(wait, mode, sync, key);
+ return 1;
}
-int btrfs_tree_locked(struct extent_buffer *eb)
+/*
+ * returns with the extent buffer spinlocked.
+ *
+ * This will spin and/or wait as required to take the lock, and then
+ * return with the spinlock held.
+ *
+ * After this call, scheduling is not safe without first calling
+ * btrfs_set_lock_blocking()
+ */
+int btrfs_tree_lock(struct extent_buffer *eb)
{
- return mutex_is_locked(&eb->mutex);
+ DEFINE_WAIT(wait);
+ wait.func = btrfs_wake_function;
+
+ while(1) {
+ spin_nested(eb);
+
+ /* nobody is blocking, exit with the spinlock held */
+ if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
+ return 0;
+
+ /*
+ * we have the spinlock, but the real owner is blocking.
+ * wait for them
+ */
+ spin_unlock(&eb->lock);
+
+ /*
+ * spin for a bit, and if the blocking flag goes away,
+ * loop around
+ */
+ if (btrfs_spin_on_block(eb))
+ continue;
+
+ prepare_to_wait_exclusive(&eb->lock_wq, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
+ schedule();
+
+ finish_wait(&eb->lock_wq, &wait);
+ }
+ return 0;
}
/*
- * btrfs_search_slot uses this to decide if it should drop its locks
- * before doing something expensive like allocating free blocks for cow.
+ * Very quick trylock, this does not spin or schedule. It returns
+ * 1 with the spinlock held if it was able to take the lock, or it
+ * returns zero if it was unable to take the lock.
+ *
+ * After this call, scheduling is not safe without first calling
+ * btrfs_set_lock_blocking()
*/
-int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
+int btrfs_try_tree_lock(struct extent_buffer *eb)
{
- int i;
- struct extent_buffer *eb;
- for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
- eb = path->nodes[i];
- if (!eb)
- break;
- smp_mb();
- if (!list_empty(&eb->mutex.wait_list))
- return 1;
+ if (spin_trylock(&eb->lock)) {
+ if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
+ /*
+ * we've got the spinlock, but the real owner is
+ * blocking. Drop the spinlock and return failure
+ */
+ spin_unlock(&eb->lock);
+ return 0;
+ }
+ return 1;
}
+ /* someone else has the spinlock giveup */
return 0;
}
+int btrfs_tree_unlock(struct extent_buffer *eb)
+{
+ /*
+ * if we were a blocking owner, we don't have the spinlock held
+ * just clear the bit and look for waiters
+ */
+ if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
+ smp_mb__after_clear_bit();
+ else
+ spin_unlock(&eb->lock);
+
+ if (waitqueue_active(&eb->lock_wq))
+ wake_up(&eb->lock_wq);
+ return 0;
+}
+
+int btrfs_tree_locked(struct extent_buffer *eb)
+{
+ return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
+ spin_is_locked(&eb->lock);
+}
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index bc1faef1251..6bb0afbff92 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -22,6 +22,10 @@
int btrfs_tree_lock(struct extent_buffer *eb);
int btrfs_tree_unlock(struct extent_buffer *eb);
int btrfs_tree_locked(struct extent_buffer *eb);
+
int btrfs_try_tree_lock(struct extent_buffer *eb);
-int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
+int btrfs_try_spin_lock(struct extent_buffer *eb);
+
+void btrfs_set_lock_blocking(struct extent_buffer *eb);
+void btrfs_clear_lock_blocking(struct extent_buffer *eb);
#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a2094017027..77c2411a5f0 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -613,7 +613,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
struct btrfs_sector_sum *sector_sums;
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
- struct list_head *cur;
unsigned long num_sectors;
unsigned long i;
u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
@@ -624,8 +623,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
return 1;
mutex_lock(&tree->mutex);
- list_for_each_prev(cur, &ordered->list) {
- ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list);
+ list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
if (disk_bytenr >= ordered_sum->bytenr) {
num_sectors = ordered_sum->len / sectorsize;
sector_sums = ordered_sum->sums;
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
index 6f0acc4c9ea..d0cc62bccb9 100644
--- a/fs/btrfs/ref-cache.c
+++ b/fs/btrfs/ref-cache.c
@@ -17,6 +17,7 @@
*/
#include <linux/sched.h>
+#include <linux/sort.h>
#include "ctree.h"
#include "ref-cache.h"
#include "transaction.h"
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
index 16f3183d7c5..bc283ad2db7 100644
--- a/fs/btrfs/ref-cache.h
+++ b/fs/btrfs/ref-cache.h
@@ -73,5 +73,4 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
int shared);
int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
-
#endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index db9fb3bc1e3..19a4daf03cc 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -37,7 +37,6 @@
#include <linux/ctype.h>
#include <linux/namei.h>
#include <linux/miscdevice.h>
-#include <linux/version.h>
#include <linux/magic.h>
#include "compat.h"
#include "ctree.h"
@@ -380,7 +379,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
btrfs_start_delalloc_inodes(root);
btrfs_wait_ordered_extents(root, 0);
- btrfs_clean_old_snapshots(root);
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
sb->s_dirt = 0;
@@ -512,6 +510,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
struct btrfs_root *root = btrfs_sb(sb);
int ret;
+ ret = btrfs_parse_options(root, data);
+ if (ret)
+ return -EINVAL;
+
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
@@ -583,17 +585,18 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
struct btrfs_ioctl_vol_args *vol;
struct btrfs_fs_devices *fs_devices;
int ret = -ENOTTY;
- int len;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
vol = kmalloc(sizeof(*vol), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
if (copy_from_user(vol, (void __user *)arg, sizeof(*vol))) {
ret = -EFAULT;
goto out;
}
- len = strnlen(vol->name, BTRFS_PATH_NAME_MAX);
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8a08f944334..4112d53d4f4 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -688,7 +688,9 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
num_bytes -= btrfs_root_used(&dirty->root->root_item);
bytes_used = btrfs_root_used(&root->root_item);
if (num_bytes) {
+ mutex_lock(&root->fs_info->trans_mutex);
btrfs_record_root_in_trans(root);
+ mutex_unlock(&root->fs_info->trans_mutex);
btrfs_set_root_used(&root->root_item,
bytes_used - num_bytes);
}
@@ -852,11 +854,9 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
{
struct btrfs_pending_snapshot *pending;
struct list_head *head = &trans->transaction->pending_snapshots;
- struct list_head *cur;
int ret;
- list_for_each(cur, head) {
- pending = list_entry(cur, struct btrfs_pending_snapshot, list);
+ list_for_each_entry(pending, head, list) {
ret = create_pending_snapshot(trans, fs_info, pending);
BUG_ON(ret);
}
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 3e8358c3616..98d25fa4570 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -74,6 +74,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
u32 nritems;
root_node = btrfs_lock_root_node(root);
+ btrfs_set_lock_blocking(root_node);
nritems = btrfs_header_nritems(root_node);
root->defrag_max.objectid = 0;
/* from above we know this is not a leaf */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d81cda2e077..9c462fbd60f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -78,104 +78,6 @@ static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
*/
/*
- * btrfs_add_log_tree adds a new per-subvolume log tree into the
- * tree of log tree roots. This must be called with a tree log transaction
- * running (see start_log_trans).
- */
-static int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
-{
- struct btrfs_key key;
- struct btrfs_root_item root_item;
- struct btrfs_inode_item *inode_item;
- struct extent_buffer *leaf;
- struct btrfs_root *new_root = root;
- int ret;
- u64 objectid = root->root_key.objectid;
-
- leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
- BTRFS_TREE_LOG_OBJECTID,
- trans->transid, 0, 0, 0);
- if (IS_ERR(leaf)) {
- ret = PTR_ERR(leaf);
- return ret;
- }
-
- btrfs_set_header_nritems(leaf, 0);
- btrfs_set_header_level(leaf, 0);
- btrfs_set_header_bytenr(leaf, leaf->start);
- btrfs_set_header_generation(leaf, trans->transid);
- btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
-
- write_extent_buffer(leaf, root->fs_info->fsid,
- (unsigned long)btrfs_header_fsid(leaf),
- BTRFS_FSID_SIZE);
- btrfs_mark_buffer_dirty(leaf);
-
- inode_item = &root_item.inode;
- memset(inode_item, 0, sizeof(*inode_item));
- inode_item->generation = cpu_to_le64(1);
- inode_item->size = cpu_to_le64(3);
- inode_item->nlink = cpu_to_le32(1);
- inode_item->nbytes = cpu_to_le64(root->leafsize);
- inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
-
- btrfs_set_root_bytenr(&root_item, leaf->start);
- btrfs_set_root_generation(&root_item, trans->transid);
- btrfs_set_root_level(&root_item, 0);
- btrfs_set_root_refs(&root_item, 0);
- btrfs_set_root_used(&root_item, 0);
-
- memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
- root_item.drop_level = 0;
-
- btrfs_tree_unlock(leaf);
- free_extent_buffer(leaf);
- leaf = NULL;
-
- btrfs_set_root_dirid(&root_item, 0);
-
- key.objectid = BTRFS_TREE_LOG_OBJECTID;
- key.offset = objectid;
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
- ret = btrfs_insert_root(trans, root->fs_info->log_root_tree, &key,
- &root_item);
- if (ret)
- goto fail;
-
- new_root = btrfs_read_fs_root_no_radix(root->fs_info->log_root_tree,
- &key);
- BUG_ON(!new_root);
-
- WARN_ON(root->log_root);
- root->log_root = new_root;
-
- /*
- * log trees do not get reference counted because they go away
- * before a real commit is actually done. They do store pointers
- * to file data extents, and those reference counts still get
- * updated (along with back refs to the log tree).
- */
- new_root->ref_cows = 0;
- new_root->last_trans = trans->transid;
-
- /*
- * we need to make sure the root block for this new tree
- * is marked as dirty in the dirty_log_pages tree. This
- * is how it gets flushed down to disk at tree log commit time.
- *
- * the tree logging mutex keeps others from coming in and changing
- * the new_root->node, so we can safely access it here
- */
- set_extent_dirty(&new_root->dirty_log_pages, new_root->node->start,
- new_root->node->start + new_root->node->len - 1,
- GFP_NOFS);
-
-fail:
- return ret;
-}
-
-/*
* start a sub transaction and setup the log tree
* this increments the log tree writer count to make the people
* syncing the tree wait for us to finish
@@ -184,6 +86,14 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
int ret;
+
+ mutex_lock(&root->log_mutex);
+ if (root->log_root) {
+ root->log_batch++;
+ atomic_inc(&root->log_writers);
+ mutex_unlock(&root->log_mutex);
+ return 0;
+ }
mutex_lock(&root->fs_info->tree_log_mutex);
if (!root->fs_info->log_root_tree) {
ret = btrfs_init_log_root_tree(trans, root->fs_info);
@@ -193,9 +103,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
ret = btrfs_add_log_tree(trans, root);
BUG_ON(ret);
}
- atomic_inc(&root->fs_info->tree_log_writers);
- root->fs_info->tree_log_batch++;
mutex_unlock(&root->fs_info->tree_log_mutex);
+ root->log_batch++;
+ atomic_inc(&root->log_writers);
+ mutex_unlock(&root->log_mutex);
return 0;
}
@@ -212,13 +123,12 @@ static int join_running_log_trans(struct btrfs_root *root)
if (!root->log_root)
return -ENOENT;
- mutex_lock(&root->fs_info->tree_log_mutex);
+ mutex_lock(&root->log_mutex);
if (root->log_root) {
ret = 0;
- atomic_inc(&root->fs_info->tree_log_writers);
- root->fs_info->tree_log_batch++;
+ atomic_inc(&root->log_writers);
}
- mutex_unlock(&root->fs_info->tree_log_mutex);
+ mutex_unlock(&root->log_mutex);
return ret;
}
@@ -228,10 +138,11 @@ static int join_running_log_trans(struct btrfs_root *root)
*/
static int end_log_trans(struct btrfs_root *root)
{
- atomic_dec(&root->fs_info->tree_log_writers);
- smp_mb();
- if (waitqueue_active(&root->fs_info->tree_log_wait))
- wake_up(&root->fs_info->tree_log_wait);
+ if (atomic_dec_and_test(&root->log_writers)) {
+ smp_mb();
+ if (waitqueue_active(&root->log_writer_wait))
+ wake_up(&root->log_writer_wait);
+ }
return 0;
}
@@ -1704,6 +1615,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next);
clean_tree_block(trans, root, next);
+ btrfs_set_lock_blocking(next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -1750,6 +1662,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
next = path->nodes[*level];
btrfs_tree_lock(next);
clean_tree_block(trans, root, next);
+ btrfs_set_lock_blocking(next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -1807,6 +1720,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next);
clean_tree_block(trans, root, next);
+ btrfs_set_lock_blocking(next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -1879,6 +1793,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next);
clean_tree_block(trans, log, next);
+ btrfs_set_lock_blocking(next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -1902,26 +1817,65 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
}
}
btrfs_free_path(path);
- if (wc->free)
- free_extent_buffer(log->node);
return ret;
}
-static int wait_log_commit(struct btrfs_root *log)
+/*
+ * helper function to update the item for a given subvolumes log root
+ * in the tree of log roots
+ */
+static int update_log_root(struct btrfs_trans_handle *trans,
+ struct btrfs_root *log)
+{
+ int ret;
+
+ if (log->log_transid == 1) {
+ /* insert root item on the first sync */
+ ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
+ &log->root_key, &log->root_item);
+ } else {
+ ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
+ &log->root_key, &log->root_item);
+ }
+ return ret;
+}
+
+static int wait_log_commit(struct btrfs_root *root, unsigned long transid)
{
DEFINE_WAIT(wait);
- u64 transid = log->fs_info->tree_log_transid;
+ int index = transid % 2;
+ /*
+ * we only allow two pending log transactions at a time,
+ * so we know that if ours is more than 2 older than the
+ * current transaction, we're done
+ */
do {
- prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- mutex_unlock(&log->fs_info->tree_log_mutex);
- if (atomic_read(&log->fs_info->tree_log_commit))
+ prepare_to_wait(&root->log_commit_wait[index],
+ &wait, TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&root->log_mutex);
+ if (root->log_transid < transid + 2 &&
+ atomic_read(&root->log_commit[index]))
schedule();
- finish_wait(&log->fs_info->tree_log_wait, &wait);
- mutex_lock(&log->fs_info->tree_log_mutex);
- } while (transid == log->fs_info->tree_log_transid &&
- atomic_read(&log->fs_info->tree_log_commit));
+ finish_wait(&root->log_commit_wait[index], &wait);
+ mutex_lock(&root->log_mutex);
+ } while (root->log_transid < transid + 2 &&
+ atomic_read(&root->log_commit[index]));
+ return 0;
+}
+
+static int wait_for_writer(struct btrfs_root *root)
+{
+ DEFINE_WAIT(wait);
+ while (atomic_read(&root->log_writers)) {
+ prepare_to_wait(&root->log_writer_wait,
+ &wait, TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&root->log_mutex);
+ if (atomic_read(&root->log_writers))
+ schedule();
+ mutex_lock(&root->log_mutex);
+ finish_wait(&root->log_writer_wait, &wait);
+ }
return 0;
}
@@ -1933,57 +1887,114 @@ static int wait_log_commit(struct btrfs_root *log)
int btrfs_sync_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ int index1;
+ int index2;
int ret;
- unsigned long batch;
struct btrfs_root *log = root->log_root;
+ struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
- mutex_lock(&log->fs_info->tree_log_mutex);
- if (atomic_read(&log->fs_info->tree_log_commit)) {
- wait_log_commit(log);
- goto out;
+ mutex_lock(&root->log_mutex);
+ index1 = root->log_transid % 2;
+ if (atomic_read(&root->log_commit[index1])) {
+ wait_log_commit(root, root->log_transid);
+ mutex_unlock(&root->log_mutex);
+ return 0;
}
- atomic_set(&log->fs_info->tree_log_commit, 1);
+ atomic_set(&root->log_commit[index1], 1);
+
+ /* wait for previous tree log sync to complete */
+ if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
+ wait_log_commit(root, root->log_transid - 1);
while (1) {
- batch = log->fs_info->tree_log_batch;
- mutex_unlock(&log->fs_info->tree_log_mutex);
+ unsigned long batch = root->log_batch;
+ mutex_unlock(&root->log_mutex);
schedule_timeout_uninterruptible(1);
- mutex_lock(&log->fs_info->tree_log_mutex);
-
- while (atomic_read(&log->fs_info->tree_log_writers)) {
- DEFINE_WAIT(wait);
- prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- mutex_unlock(&log->fs_info->tree_log_mutex);
- if (atomic_read(&log->fs_info->tree_log_writers))
- schedule();
- mutex_lock(&log->fs_info->tree_log_mutex);
- finish_wait(&log->fs_info->tree_log_wait, &wait);
- }
- if (batch == log->fs_info->tree_log_batch)
+ mutex_lock(&root->log_mutex);
+ wait_for_writer(root);
+ if (batch == root->log_batch)
break;
}
ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages);
BUG_ON(ret);
- ret = btrfs_write_and_wait_marked_extents(root->fs_info->log_root_tree,
- &root->fs_info->log_root_tree->dirty_log_pages);
+
+ btrfs_set_root_bytenr(&log->root_item, log->node->start);
+ btrfs_set_root_generation(&log->root_item, trans->transid);
+ btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
+
+ root->log_batch = 0;
+ root->log_transid++;
+ log->log_transid = root->log_transid;
+ smp_mb();
+ /*
+ * log tree has been flushed to disk, new modifications of
+ * the log will be written to new positions. so it's safe to
+ * allow log writers to go in.
+ */
+ mutex_unlock(&root->log_mutex);
+
+ mutex_lock(&log_root_tree->log_mutex);
+ log_root_tree->log_batch++;
+ atomic_inc(&log_root_tree->log_writers);
+ mutex_unlock(&log_root_tree->log_mutex);
+
+ ret = update_log_root(trans, log);
+ BUG_ON(ret);
+
+ mutex_lock(&log_root_tree->log_mutex);
+ if (atomic_dec_and_test(&log_root_tree->log_writers)) {
+ smp_mb();
+ if (waitqueue_active(&log_root_tree->log_writer_wait))
+ wake_up(&log_root_tree->log_writer_wait);
+ }
+
+ index2 = log_root_tree->log_transid % 2;
+ if (atomic_read(&log_root_tree->log_commit[index2])) {
+ wait_log_commit(log_root_tree, log_root_tree->log_transid);
+ mutex_unlock(&log_root_tree->log_mutex);
+ goto out;
+ }
+ atomic_set(&log_root_tree->log_commit[index2], 1);
+
+ if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2]))
+ wait_log_commit(log_root_tree, log_root_tree->log_transid - 1);
+
+ wait_for_writer(log_root_tree);
+
+ ret = btrfs_write_and_wait_marked_extents(log_root_tree,
+ &log_root_tree->dirty_log_pages);
BUG_ON(ret);
btrfs_set_super_log_root(&root->fs_info->super_for_commit,
- log->fs_info->log_root_tree->node->start);
+ log_root_tree->node->start);
btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
- btrfs_header_level(log->fs_info->log_root_tree->node));
+ btrfs_header_level(log_root_tree->node));
+
+ log_root_tree->log_batch = 0;
+ log_root_tree->log_transid++;
+ smp_mb();
+
+ mutex_unlock(&log_root_tree->log_mutex);
+
+ /*
+ * nobody else is going to jump in and write the the ctree
+ * super here because the log_commit atomic below is protecting
+ * us. We must be called with a transaction handle pinning
+ * the running transaction open, so a full commit can't hop
+ * in and cause problems either.
+ */
+ write_ctree_super(trans, root->fs_info->tree_root, 2);
- write_ctree_super(trans, log->fs_info->tree_root, 2);
- log->fs_info->tree_log_transid++;
- log->fs_info->tree_log_batch = 0;
- atomic_set(&log->fs_info->tree_log_commit, 0);
+ atomic_set(&log_root_tree->log_commit[index2], 0);
smp_mb();
- if (waitqueue_active(&log->fs_info->tree_log_wait))
- wake_up(&log->fs_info->tree_log_wait);
+ if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
+ wake_up(&log_root_tree->log_commit_wait[index2]);
out:
- mutex_unlock(&log->fs_info->tree_log_mutex);
+ atomic_set(&root->log_commit[index1], 0);
+ smp_mb();
+ if (waitqueue_active(&root->log_commit_wait[index1]))
+ wake_up(&root->log_commit_wait[index1]);
return 0;
}
@@ -2019,38 +2030,18 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
start, end, GFP_NOFS);
}
- log = root->log_root;
- ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
- &log->root_key);
- BUG_ON(ret);
+ if (log->log_transid > 0) {
+ ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
+ &log->root_key);
+ BUG_ON(ret);
+ }
root->log_root = NULL;
- kfree(root->log_root);
+ free_extent_buffer(log->node);
+ kfree(log);
return 0;
}
/*
- * helper function to update the item for a given subvolumes log root
- * in the tree of log roots
- */
-static int update_log_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *log)
-{
- u64 bytenr = btrfs_root_bytenr(&log->root_item);
- int ret;
-
- if (log->node->start == bytenr)
- return 0;
-
- btrfs_set_root_bytenr(&log->root_item, log->node->start);
- btrfs_set_root_generation(&log->root_item, trans->transid);
- btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
- ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
- &log->root_key, &log->root_item);
- BUG_ON(ret);
- return ret;
-}
-
-/*
* If both a file and directory are logged, and unlinks or renames are
* mixed in, we have a few interesting corners:
*
@@ -2711,11 +2702,6 @@ next_slot:
btrfs_free_path(path);
btrfs_free_path(dst_path);
-
- mutex_lock(&root->fs_info->tree_log_mutex);
- ret = update_log_root(trans, log);
- BUG_ON(ret);
- mutex_unlock(&root->fs_info->tree_log_mutex);
out:
return 0;
}
@@ -2846,7 +2832,9 @@ again:
BUG_ON(!wc.replay_dest);
wc.replay_dest->log_root = log;
+ mutex_lock(&fs_info->trans_mutex);
btrfs_record_root_in_trans(wc.replay_dest);
+ mutex_unlock(&fs_info->trans_mutex);
ret = walk_log_tree(trans, log, &wc);
BUG_ON(ret);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3451e1cca2b..1316139bf9e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -20,7 +20,6 @@
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/random.h>
-#include <linux/version.h>
#include <asm/div64.h>
#include "compat.h"
#include "ctree.h"
@@ -104,10 +103,8 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
u64 devid, u8 *uuid)
{
struct btrfs_device *dev;
- struct list_head *cur;
- list_for_each(cur, head) {
- dev = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(dev, head, dev_list) {
if (dev->devid == devid &&
(!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
return dev;
@@ -118,11 +115,9 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
{
- struct list_head *cur;
struct btrfs_fs_devices *fs_devices;
- list_for_each(cur, &fs_uuids) {
- fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
+ list_for_each_entry(fs_devices, &fs_uuids, list) {
if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
return fs_devices;
}
@@ -159,6 +154,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
loop:
spin_lock(&device->io_lock);
+loop_lock:
/* take all the bios off the list at once and process them
* later on (without the lock held). But, remember the
* tail and other pointers so the bios can be properly reinserted
@@ -208,7 +204,7 @@ loop:
* is now congested. Back off and let other work structs
* run instead
*/
- if (pending && bdi_write_congested(bdi) &&
+ if (pending && bdi_write_congested(bdi) && num_run > 16 &&
fs_info->fs_devices->open_devices > 1) {
struct bio *old_head;
@@ -220,7 +216,8 @@ loop:
tail->bi_next = old_head;
else
device->pending_bio_tail = tail;
- device->running_pending = 0;
+
+ device->running_pending = 1;
spin_unlock(&device->io_lock);
btrfs_requeue_work(&device->work);
@@ -229,6 +226,11 @@ loop:
}
if (again)
goto loop;
+
+ spin_lock(&device->io_lock);
+ if (device->pending_bios)
+ goto loop_lock;
+ spin_unlock(&device->io_lock);
done:
return 0;
}
@@ -345,14 +347,11 @@ error:
int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
{
- struct list_head *tmp;
- struct list_head *cur;
- struct btrfs_device *device;
+ struct btrfs_device *device, *next;
mutex_lock(&uuid_mutex);
again:
- list_for_each_safe(cur, tmp, &fs_devices->devices) {
- device = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
if (device->in_fs_metadata)
continue;
@@ -383,14 +382,12 @@ again:
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
- struct list_head *cur;
struct btrfs_device *device;
if (--fs_devices->opened > 0)
return 0;
- list_for_each(cur, &fs_devices->devices) {
- device = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->bdev) {
close_bdev_exclusive(device->bdev, device->mode);
fs_devices->open_devices--;
@@ -439,7 +436,6 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
{
struct block_device *bdev;
struct list_head *head = &fs_devices->devices;
- struct list_head *cur;
struct btrfs_device *device;
struct block_device *latest_bdev = NULL;
struct buffer_head *bh;
@@ -450,8 +446,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
int seeding = 1;
int ret = 0;
- list_for_each(cur, head) {
- device = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(device, head, dev_list) {
if (device->bdev)
continue;
if (!device->name)
@@ -578,7 +573,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
*(unsigned long long *)disk_super->fsid,
*(unsigned long long *)(disk_super->fsid + 8));
}
- printk(KERN_INFO "devid %llu transid %llu %s\n",
+ printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
@@ -1017,14 +1012,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
}
if (strcmp(device_path, "missing") == 0) {
- struct list_head *cur;
struct list_head *devices;
struct btrfs_device *tmp;
device = NULL;
devices = &root->fs_info->fs_devices->devices;
- list_for_each(cur, devices) {
- tmp = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(tmp, devices, dev_list) {
if (tmp->in_fs_metadata && !tmp->bdev) {
device = tmp;
break;
@@ -1280,7 +1273,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
struct block_device *bdev;
- struct list_head *cur;
struct list_head *devices;
struct super_block *sb = root->fs_info->sb;
u64 total_bytes;
@@ -1304,8 +1296,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
mutex_lock(&root->fs_info->volume_mutex);
devices = &root->fs_info->fs_devices->devices;
- list_for_each(cur, devices) {
- device = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(device, devices, dev_list) {
if (device->bdev == bdev) {
ret = -EEXIST;
goto error;
@@ -1704,7 +1695,6 @@ static u64 div_factor(u64 num, int factor)
int btrfs_balance(struct btrfs_root *dev_root)
{
int ret;
- struct list_head *cur;
struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
struct btrfs_device *device;
u64 old_size;
@@ -1723,8 +1713,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
dev_root = dev_root->fs_info->dev_root;
/* step one make some room on all the devices */
- list_for_each(cur, devices) {
- device = list_entry(cur, struct btrfs_device, dev_list);
+ list_for_each_entry(device, devices, dev_list) {
old_size = device->total_bytes;
size_to_free = div_factor(old_size, 1);
size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
@@ -2905,10 +2894,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
free_extent_map(em);
}
- map = kzalloc(sizeof(*map), GFP_NOFS);
- if (!map)
- return -ENOMEM;
-
em = alloc_extent_map(GFP_NOFS);
if (!em)
return -ENOMEM;
@@ -3117,6 +3102,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
if (!sb)
return -ENOMEM;
btrfs_set_buffer_uptodate(sb);
+ btrfs_set_buffer_lockdep_class(sb, 0);
+
write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
array_size = btrfs_super_sys_array_size(super_copy);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 7f332e27089..a9d3bf4d268 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/rwsem.h>
#include <linux/xattr.h>
+#include <linux/security.h>
#include "ctree.h"
#include "btrfs_inode.h"
#include "transaction.h"
@@ -45,9 +46,12 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
/* lookup the xattr by name */
di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
strlen(name), 0);
- if (!di || IS_ERR(di)) {
+ if (!di) {
ret = -ENODATA;
goto out;
+ } else if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
}
leaf = path->nodes[0];
@@ -62,6 +66,14 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
ret = -ERANGE;
goto out;
}
+
+ /*
+ * The way things are packed into the leaf is like this
+ * |struct btrfs_dir_item|name|data|
+ * where name is the xattr name, so security.foo, and data is the
+ * content of the xattr. data_ptr points to the location in memory
+ * where the data starts in the in memory leaf
+ */
data_ptr = (unsigned long)((char *)(di + 1) +
btrfs_dir_name_len(leaf, di));
read_extent_buffer(leaf, buffer, data_ptr,
@@ -86,7 +98,7 @@ int __btrfs_setxattr(struct inode *inode, const char *name,
if (!path)
return -ENOMEM;
- trans = btrfs_start_transaction(root, 1);
+ trans = btrfs_join_transaction(root, 1);
btrfs_set_trans_block_group(trans, inode);
/* first lets see if we already have this xattr */
@@ -176,7 +188,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- ret = 0;
advance = 0;
while (1) {
leaf = path->nodes[0];
@@ -320,3 +331,34 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
return -EOPNOTSUPP;
return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
}
+
+int btrfs_xattr_security_init(struct inode *inode, struct inode *dir)
+{
+ int err;
+ size_t len;
+ void *value;
+ char *suffix;
+ char *name;
+
+ err = security_inode_init_security(inode, dir, &suffix, &value, &len);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(suffix) + 1,
+ GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ } else {
+ strcpy(name, XATTR_SECURITY_PREFIX);
+ strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
+ err = __btrfs_setxattr(inode, name, value, len, 0);
+ kfree(name);
+ }
+
+ kfree(suffix);
+ kfree(value);
+ return err;
+}
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 5b1d08f8e68..c71e9c3cf3f 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -36,4 +36,6 @@ extern int btrfs_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int btrfs_removexattr(struct dentry *dentry, const char *name);
+extern int btrfs_xattr_security_init(struct inode *inode, struct inode *dir);
+
#endif /* __XATTR__ */
diff --git a/fs/buffer.c b/fs/buffer.c
index b58208f1640..9f697419ed8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -777,6 +777,7 @@ static int __set_page_dirty(struct page *page,
__inc_zone_page_state(page, NR_FILE_DIRTY);
__inc_bdi_stat(mapping->backing_dev_info,
BDI_RECLAIMABLE);
+ task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
}
radix_tree_tag_set(&mapping->page_tree,
@@ -2688,7 +2689,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
struct buffer_head *bh;
BUG_ON(fsdata != NULL && page_has_buffers(page));
- if (unlikely(copied < len) && !page_has_buffers(page))
+ if (unlikely(copied < len) && head)
attach_nobh_buffers(page, head);
if (page_has_buffers(page))
return generic_write_end(file, mapping, pos, len,
@@ -3108,7 +3109,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(WRITE_SYNC, bh);
+ ret = submit_bh(WRITE, bh);
wait_on_buffer(bh);
if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh);
diff --git a/fs/compat.c b/fs/compat.c
index 65a070e705a..d0145ca2757 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1407,7 +1407,7 @@ int compat_do_execve(char * filename,
bprm->cred = prepare_exec_creds();
if (!bprm->cred)
goto out_unlock;
- check_unsafe_exec(bprm);
+ check_unsafe_exec(bprm, current->files);
file = open_exec(filename);
retval = PTR_ERR(file);
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c8f8d5904f5..39bd4d38e88 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -785,7 +785,7 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
if (copy_in_user(&sgio->status, &sgio32->status,
(4 * sizeof(unsigned char)) +
- (2 * sizeof(unsigned (short))) +
+ (2 * sizeof(unsigned short)) +
(3 * sizeof(int))))
return -EFAULT;
@@ -1938,6 +1938,8 @@ ULONG_IOCTL(SET_BITMAP_FILE)
/* Big K */
COMPATIBLE_IOCTL(PIO_FONT)
COMPATIBLE_IOCTL(GIO_FONT)
+COMPATIBLE_IOCTL(PIO_CMAP)
+COMPATIBLE_IOCTL(GIO_CMAP)
ULONG_IOCTL(KDSIGACCEPT)
COMPATIBLE_IOCTL(KDGETKEYCODE)
COMPATIBLE_IOCTL(KDSETKEYCODE)
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c01e043670e..f6caeb1d110 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1716,7 +1716,7 @@ static int ecryptfs_copy_filename(char **copied_name, size_t *copied_name_size,
{
int rc = 0;
- (*copied_name) = kmalloc((name_size + 2), GFP_KERNEL);
+ (*copied_name) = kmalloc((name_size + 1), GFP_KERNEL);
if (!(*copied_name)) {
rc = -ENOMEM;
goto out;
@@ -1726,7 +1726,7 @@ static int ecryptfs_copy_filename(char **copied_name, size_t *copied_name_size,
* in printing out the
* string in debug
* messages */
- (*copied_name_size) = (name_size + 1);
+ (*copied_name_size) = name_size;
out:
return rc;
}
diff --git a/fs/exec.c b/fs/exec.c
index 0dd60a01f1b..929b58004b7 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1049,16 +1049,32 @@ EXPORT_SYMBOL(install_exec_creds);
* - the caller must hold current->cred_exec_mutex to protect against
* PTRACE_ATTACH
*/
-void check_unsafe_exec(struct linux_binprm *bprm)
+void check_unsafe_exec(struct linux_binprm *bprm, struct files_struct *files)
{
- struct task_struct *p = current;
+ struct task_struct *p = current, *t;
+ unsigned long flags;
+ unsigned n_fs, n_files, n_sighand;
bprm->unsafe = tracehook_unsafe_exec(p);
- if (atomic_read(&p->fs->count) > 1 ||
- atomic_read(&p->files->count) > 1 ||
- atomic_read(&p->sighand->count) > 1)
+ n_fs = 1;
+ n_files = 1;
+ n_sighand = 1;
+ lock_task_sighand(p, &flags);
+ for (t = next_thread(p); t != p; t = next_thread(t)) {
+ if (t->fs == p->fs)
+ n_fs++;
+ if (t->files == files)
+ n_files++;
+ n_sighand++;
+ }
+
+ if (atomic_read(&p->fs->count) > n_fs ||
+ atomic_read(&p->files->count) > n_files ||
+ atomic_read(&p->sighand->count) > n_sighand)
bprm->unsafe |= LSM_UNSAFE_SHARE;
+
+ unlock_task_sighand(p, &flags);
}
/*
@@ -1273,7 +1289,7 @@ int do_execve(char * filename,
bprm->cred = prepare_exec_creds();
if (!bprm->cred)
goto out_unlock;
- check_unsafe_exec(bprm);
+ check_unsafe_exec(bprm, displaced);
file = open_exec(filename);
retval = PTR_ERR(file);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index da8bdeaa2e6..7c6e3606f0e 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1185,9 +1185,12 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
es = sbi->s_es;
if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
(old_mount_opt & EXT2_MOUNT_XIP)) &&
- invalidate_inodes(sb))
- ext2_warning(sb, __func__, "busy inodes while remounting "\
- "xip remain in cache (no functional problem)");
+ invalidate_inodes(sb)) {
+ ext2_warning(sb, __func__, "refusing change of xip flag "
+ "with busy inodes while remounting");
+ sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
+ sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
+ }
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
if (*flags & MS_RDONLY) {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index b70d90e08a3..4a970411a45 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2428,12 +2428,13 @@ static void ext3_write_super (struct super_block * sb)
static int ext3_sync_fs(struct super_block *sb, int wait)
{
- sb->s_dirt = 0;
- if (wait)
- ext3_force_commit(sb);
- else
- journal_start_commit(EXT3_SB(sb)->s_journal, NULL);
+ tid_t target;
+ sb->s_dirt = 0;
+ if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
+ if (wait)
+ log_wait_commit(EXT3_SB(sb)->s_journal, target);
+ }
return 0;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index aafc9eba1c2..b0c87dce66a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -868,7 +868,7 @@ static inline unsigned ext4_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
- if (len == EXT4_MAX_REC_LEN)
+ if (len == EXT4_MAX_REC_LEN || len == 0)
return 1 << 16;
return len;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 03ba20be132..cbd2ca99d11 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -47,8 +47,10 @@
static inline int ext4_begin_ordered_truncate(struct inode *inode,
loff_t new_size)
{
- return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode,
- new_size);
+ return jbd2_journal_begin_ordered_truncate(
+ EXT4_SB(inode->i_sb)->s_journal,
+ &EXT4_I(inode)->jinode,
+ new_size);
}
static void ext4_invalidatepage(struct page *page, unsigned long offset);
@@ -2437,6 +2439,7 @@ static int ext4_da_writepages(struct address_space *mapping,
int no_nrwrite_index_update;
int pages_written = 0;
long pages_skipped;
+ int range_cyclic, cycled = 1, io_done = 0;
int needed_blocks, ret = 0, nr_to_writebump = 0;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
@@ -2488,9 +2491,15 @@ static int ext4_da_writepages(struct address_space *mapping,
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
- if (wbc->range_cyclic)
+ range_cyclic = wbc->range_cyclic;
+ if (wbc->range_cyclic) {
index = mapping->writeback_index;
- else
+ if (index)
+ cycled = 0;
+ wbc->range_start = index << PAGE_CACHE_SHIFT;
+ wbc->range_end = LLONG_MAX;
+ wbc->range_cyclic = 0;
+ } else
index = wbc->range_start >> PAGE_CACHE_SHIFT;
mpd.wbc = wbc;
@@ -2504,6 +2513,7 @@ static int ext4_da_writepages(struct address_space *mapping,
wbc->no_nrwrite_index_update = 1;
pages_skipped = wbc->pages_skipped;
+retry:
while (!ret && wbc->nr_to_write > 0) {
/*
@@ -2546,6 +2556,7 @@ static int ext4_da_writepages(struct address_space *mapping,
pages_written += mpd.pages_written;
wbc->pages_skipped = pages_skipped;
ret = 0;
+ io_done = 1;
} else if (wbc->nr_to_write)
/*
* There is no more writeout needed
@@ -2554,6 +2565,13 @@ static int ext4_da_writepages(struct address_space *mapping,
*/
break;
}
+ if (!io_done && !cycled) {
+ cycled = 1;
+ index = 0;
+ wbc->range_start = index << PAGE_CACHE_SHIFT;
+ wbc->range_end = mapping->writeback_index - 1;
+ goto retry;
+ }
if (pages_skipped != wbc->pages_skipped)
printk(KERN_EMERG "This should not happen leaving %s "
"with nr_to_write = %ld ret = %d\n",
@@ -2561,6 +2579,7 @@ static int ext4_da_writepages(struct address_space *mapping,
/* Update index */
index += pages_written;
+ wbc->range_cyclic = range_cyclic;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
/*
* set the writeback_index so that range_cyclic
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index deba54f6cbe..4415beeb0b6 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3693,6 +3693,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa->pa_free = pa->pa_len;
atomic_set(&pa->pa_count, 1);
spin_lock_init(&pa->pa_lock);
+ INIT_LIST_HEAD(&pa->pa_inode_list);
+ INIT_LIST_HEAD(&pa->pa_group_list);
pa->pa_deleted = 0;
pa->pa_linear = 0;
@@ -3755,6 +3757,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
atomic_set(&pa->pa_count, 1);
spin_lock_init(&pa->pa_lock);
INIT_LIST_HEAD(&pa->pa_inode_list);
+ INIT_LIST_HEAD(&pa->pa_group_list);
pa->pa_deleted = 0;
pa->pa_linear = 1;
@@ -4476,23 +4479,26 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
pa->pa_free -= ac->ac_b_ex.fe_len;
pa->pa_len -= ac->ac_b_ex.fe_len;
spin_unlock(&pa->pa_lock);
- /*
- * We want to add the pa to the right bucket.
- * Remove it from the list and while adding
- * make sure the list to which we are adding
- * doesn't grow big.
- */
- if (likely(pa->pa_free)) {
- spin_lock(pa->pa_obj_lock);
- list_del_rcu(&pa->pa_inode_list);
- spin_unlock(pa->pa_obj_lock);
- ext4_mb_add_n_trim(ac);
- }
}
- ext4_mb_put_pa(ac, ac->ac_sb, pa);
}
if (ac->alloc_semp)
up_read(ac->alloc_semp);
+ if (pa) {
+ /*
+ * We want to add the pa to the right bucket.
+ * Remove it from the list and while adding
+ * make sure the list to which we are adding
+ * doesn't grow big. We need to release
+ * alloc_semp before calling ext4_mb_add_n_trim()
+ */
+ if (pa->pa_linear && likely(pa->pa_free)) {
+ spin_lock(pa->pa_obj_lock);
+ list_del_rcu(&pa->pa_inode_list);
+ spin_unlock(pa->pa_obj_lock);
+ ext4_mb_add_n_trim(ac);
+ }
+ ext4_mb_put_pa(ac, ac->ac_sb, pa);
+ }
if (ac->ac_bitmap_page)
page_cache_release(ac->ac_bitmap_page);
if (ac->ac_buddy_page)
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 734abca25e3..fe64d9f7985 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -481,7 +481,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 1);
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
- goto err_out;
+ return retval;
}
tmp_inode = ext4_new_inode(handle,
inode->i_sb->s_root->d_inode,
@@ -489,8 +489,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(tmp_inode)) {
retval = -ENOMEM;
ext4_journal_stop(handle);
- tmp_inode = NULL;
- goto err_out;
+ return retval;
}
i_size_write(tmp_inode, i_size_read(inode));
/*
@@ -618,8 +617,7 @@ err_out:
ext4_journal_stop(handle);
- if (tmp_inode)
- iput(tmp_inode);
+ iput(tmp_inode);
return retval;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e5f06a5f045..a5732c58f67 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3046,14 +3046,17 @@ static void ext4_write_super(struct super_block *sb)
static int ext4_sync_fs(struct super_block *sb, int wait)
{
int ret = 0;
+ tid_t target;
trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
sb->s_dirt = 0;
if (EXT4_SB(sb)->s_journal) {
- if (wait)
- ret = ext4_force_commit(sb);
- else
- jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
+ if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal,
+ &target)) {
+ if (wait)
+ jbd2_log_wait_commit(EXT4_SB(sb)->s_journal,
+ target);
+ }
} else {
ext4_commit_super(sb, EXT4_SB(sb)->s_es, wait);
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6903d37af03..9b800d97a68 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -108,7 +108,8 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
- len >> huge_page_shift(h), vma))
+ len >> huge_page_shift(h), vma,
+ vma->vm_flags))
goto out;
ret = 0;
@@ -947,7 +948,7 @@ static int can_do_hugetlb_shm(void)
can_do_mlock());
}
-struct file *hugetlb_file_setup(const char *name, size_t size)
+struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag)
{
int error = -ENOMEM;
struct file *file;
@@ -981,7 +982,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size)
error = -ENOMEM;
if (hugetlb_reserve_pages(inode, 0,
- size >> huge_page_shift(hstate_inode(inode)), NULL))
+ size >> huge_page_shift(hstate_inode(inode)), NULL,
+ acctflag))
goto out_inode;
d_instantiate(dentry, inode);
diff --git a/fs/internal.h b/fs/internal.h
index 53af885f173..0d8ac497b3d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -43,7 +43,7 @@ extern void __init chrdev_init(void);
/*
* exec.c
*/
-extern void check_unsafe_exec(struct linux_binprm *);
+extern void check_unsafe_exec(struct linux_binprm *, struct files_struct *);
/*
* namespace.c
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 9e4fa52d7dc..e79c07812af 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -427,7 +427,7 @@ int __log_space_left(journal_t *journal)
}
/*
- * Called under j_state_lock. Returns true if a transaction was started.
+ * Called under j_state_lock. Returns true if a transaction commit was started.
*/
int __log_start_commit(journal_t *journal, tid_t target)
{
@@ -495,7 +495,8 @@ int journal_force_commit_nested(journal_t *journal)
/*
* Start a commit of the current running transaction (if any). Returns true
- * if a transaction was started, and fills its tid in at *ptid
+ * if a transaction is going to be committed (or is currently already
+ * committing), and fills its tid in at *ptid
*/
int journal_start_commit(journal_t *journal, tid_t *ptid)
{
@@ -505,15 +506,19 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
if (journal->j_running_transaction) {
tid_t tid = journal->j_running_transaction->t_tid;
- ret = __log_start_commit(journal, tid);
- if (ret && ptid)
+ __log_start_commit(journal, tid);
+ /* There's a running transaction and we've just made sure
+ * it's commit has been scheduled. */
+ if (ptid)
*ptid = tid;
- } else if (journal->j_committing_transaction && ptid) {
+ ret = 1;
+ } else if (journal->j_committing_transaction) {
/*
* If ext3_write_super() recently started a commit, then we
* have to wait for completion of that transaction
*/
- *ptid = journal->j_committing_transaction->t_tid;
+ if (ptid)
+ *ptid = journal->j_committing_transaction->t_tid;
ret = 1;
}
spin_unlock(&journal->j_state_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index eb343008ede..58144102bf2 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -450,7 +450,7 @@ int __jbd2_log_space_left(journal_t *journal)
}
/*
- * Called under j_state_lock. Returns true if a transaction was started.
+ * Called under j_state_lock. Returns true if a transaction commit was started.
*/
int __jbd2_log_start_commit(journal_t *journal, tid_t target)
{
@@ -518,7 +518,8 @@ int jbd2_journal_force_commit_nested(journal_t *journal)
/*
* Start a commit of the current running transaction (if any). Returns true
- * if a transaction was started, and fills its tid in at *ptid
+ * if a transaction is going to be committed (or is currently already
+ * committing), and fills its tid in at *ptid
*/
int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
{
@@ -528,15 +529,19 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
if (journal->j_running_transaction) {
tid_t tid = journal->j_running_transaction->t_tid;
- ret = __jbd2_log_start_commit(journal, tid);
- if (ret && ptid)
+ __jbd2_log_start_commit(journal, tid);
+ /* There's a running transaction and we've just made sure
+ * it's commit has been scheduled. */
+ if (ptid)
*ptid = tid;
- } else if (journal->j_committing_transaction && ptid) {
+ ret = 1;
+ } else if (journal->j_committing_transaction) {
/*
* If ext3_write_super() recently started a commit, then we
* have to wait for completion of that transaction
*/
- *ptid = journal->j_committing_transaction->t_tid;
+ if (ptid)
+ *ptid = journal->j_committing_transaction->t_tid;
ret = 1;
}
spin_unlock(&journal->j_state_lock);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 46b4e347ed7..28ce21d8598 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2129,26 +2129,46 @@ done:
}
/*
- * This function must be called when inode is journaled in ordered mode
- * before truncation happens. It starts writeout of truncated part in
- * case it is in the committing transaction so that we stand to ordered
- * mode consistency guarantees.
+ * File truncate and transaction commit interact with each other in a
+ * non-trivial way. If a transaction writing data block A is
+ * committing, we cannot discard the data by truncate until we have
+ * written them. Otherwise if we crashed after the transaction with
+ * write has committed but before the transaction with truncate has
+ * committed, we could see stale data in block A. This function is a
+ * helper to solve this problem. It starts writeout of the truncated
+ * part in case it is in the committing transaction.
+ *
+ * Filesystem code must call this function when inode is journaled in
+ * ordered mode before truncation happens and after the inode has been
+ * placed on orphan list with the new inode size. The second condition
+ * avoids the race that someone writes new data and we start
+ * committing the transaction after this function has been called but
+ * before a transaction for truncate is started (and furthermore it
+ * allows us to optimize the case where the addition to orphan list
+ * happens in the same transaction as write --- we don't have to write
+ * any data in such case).
*/
-int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode,
+int jbd2_journal_begin_ordered_truncate(journal_t *journal,
+ struct jbd2_inode *jinode,
loff_t new_size)
{
- journal_t *journal;
- transaction_t *commit_trans;
+ transaction_t *inode_trans, *commit_trans;
int ret = 0;
- if (!inode->i_transaction && !inode->i_next_transaction)
+ /* This is a quick check to avoid locking if not necessary */
+ if (!jinode->i_transaction)
goto out;
- journal = inode->i_transaction->t_journal;
+ /* Locks are here just to force reading of recent values, it is
+ * enough that the transaction was not committing before we started
+ * a transaction adding the inode to orphan list */
spin_lock(&journal->j_state_lock);
commit_trans = journal->j_committing_transaction;
spin_unlock(&journal->j_state_lock);
- if (inode->i_transaction == commit_trans) {
- ret = filemap_fdatawrite_range(inode->i_vfs_inode->i_mapping,
+ spin_lock(&journal->j_list_lock);
+ inode_trans = jinode->i_transaction;
+ spin_unlock(&journal->j_list_lock);
+ if (inode_trans == commit_trans) {
+ ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
new_size, LLONG_MAX);
if (ret)
jbd2_journal_abort(journal, ret);
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 6063a8e4b9f..763b78a6e9d 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -427,7 +427,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
case -EAGAIN:
ret = nlm_lck_denied;
- goto out;
+ break;
case FILE_LOCK_DEFERRED:
if (wait)
break;
@@ -443,6 +443,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
+ ret = nlm_lck_denied;
+ if (!wait)
+ goto out;
+
ret = nlm_lck_blocked;
/* Append to list of blocked */
diff --git a/fs/namespace.c b/fs/namespace.c
index 228d8c4bfd1..06f8e63f6cb 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -614,9 +614,11 @@ static inline void __mntput(struct vfsmount *mnt)
*/
for_each_possible_cpu(cpu) {
struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
- if (cpu_writer->mnt != mnt)
- continue;
spin_lock(&cpu_writer->lock);
+ if (cpu_writer->mnt != mnt) {
+ spin_unlock(&cpu_writer->lock);
+ continue;
+ }
atomic_add(cpu_writer->count, &mnt->__mnt_writers);
cpu_writer->count = 0;
/*
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
index dae3f28f30d..331f2e88e28 100644
--- a/fs/notify/inotify/inotify.c
+++ b/fs/notify/inotify/inotify.c
@@ -156,7 +156,7 @@ static int inotify_handle_get_wd(struct inotify_handle *ih,
int ret;
do {
- if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL)))
+ if (unlikely(!idr_pre_get(&ih->idr, GFP_NOFS)))
return -ENOSPC;
ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
} while (ret == -EAGAIN);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index d861096c9d8..60fe74035db 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5390,6 +5390,9 @@ int ocfs2_remove_btree_range(struct inode *inode,
goto out;
}
+ vfs_dq_free_space_nodirty(inode,
+ ocfs2_clusters_to_bytes(inode->i_sb, len));
+
ret = ocfs2_remove_extent(inode, et, cpos, len, handle, meta_ac,
dealloc);
if (ret) {
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index b1cc7c381e8..e9d7c2038c0 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -38,6 +38,7 @@
#include "dlmglue.h"
#include "file.h"
#include "inode.h"
+#include "super.h"
static int ocfs2_dentry_revalidate(struct dentry *dentry,
@@ -294,6 +295,34 @@ out_attach:
return ret;
}
+static DEFINE_SPINLOCK(dentry_list_lock);
+
+/* We limit the number of dentry locks to drop in one go. We have
+ * this limit so that we don't starve other users of ocfs2_wq. */
+#define DL_INODE_DROP_COUNT 64
+
+/* Drop inode references from dentry locks */
+void ocfs2_drop_dl_inodes(struct work_struct *work)
+{
+ struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
+ dentry_lock_work);
+ struct ocfs2_dentry_lock *dl;
+ int drop_count = DL_INODE_DROP_COUNT;
+
+ spin_lock(&dentry_list_lock);
+ while (osb->dentry_lock_list && drop_count--) {
+ dl = osb->dentry_lock_list;
+ osb->dentry_lock_list = dl->dl_next;
+ spin_unlock(&dentry_list_lock);
+ iput(dl->dl_inode);
+ kfree(dl);
+ spin_lock(&dentry_list_lock);
+ }
+ if (osb->dentry_lock_list)
+ queue_work(ocfs2_wq, &osb->dentry_lock_work);
+ spin_unlock(&dentry_list_lock);
+}
+
/*
* ocfs2_dentry_iput() and friends.
*
@@ -318,16 +347,23 @@ out_attach:
static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl)
{
- iput(dl->dl_inode);
ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
ocfs2_lock_res_free(&dl->dl_lockres);
- kfree(dl);
+
+ /* We leave dropping of inode reference to ocfs2_wq as that can
+ * possibly lead to inode deletion which gets tricky */
+ spin_lock(&dentry_list_lock);
+ if (!osb->dentry_lock_list)
+ queue_work(ocfs2_wq, &osb->dentry_lock_work);
+ dl->dl_next = osb->dentry_lock_list;
+ osb->dentry_lock_list = dl;
+ spin_unlock(&dentry_list_lock);
}
void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl)
{
- int unlock = 0;
+ int unlock;
BUG_ON(dl->dl_count == 0);
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index c091c34d988..d06e16c0664 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -29,8 +29,13 @@
extern struct dentry_operations ocfs2_dentry_ops;
struct ocfs2_dentry_lock {
+ /* Use count of dentry lock */
unsigned int dl_count;
- u64 dl_parent_blkno;
+ union {
+ /* Linked list of dentry locks to release */
+ struct ocfs2_dentry_lock *dl_next;
+ u64 dl_parent_blkno;
+ };
/*
* The ocfs2_dentry_lock keeps an inode reference until
@@ -47,6 +52,8 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode,
void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl);
+void ocfs2_drop_dl_inodes(struct work_struct *work);
+
struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno,
int skip_unhashed);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index b0c4cadd4c4..206a2370876 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2860,6 +2860,10 @@ static void ocfs2_unlock_ast(void *opaque, int error)
case OCFS2_UNLOCK_CANCEL_CONVERT:
mlog(0, "Cancel convert success for %s\n", lockres->l_name);
lockres->l_action = OCFS2_AST_INVALID;
+ /* Downconvert thread may have requeued this lock, we
+ * need to wake it. */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
break;
case OCFS2_UNLOCK_DROP_LOCK:
lockres->l_level = DLM_LOCK_IV;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 3c3532e1307..172850a9a12 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -513,8 +513,10 @@ static inline int ocfs2_jbd2_file_inode(handle_t *handle, struct inode *inode)
static inline int ocfs2_begin_ordered_truncate(struct inode *inode,
loff_t new_size)
{
- return jbd2_journal_begin_ordered_truncate(&OCFS2_I(inode)->ip_jinode,
- new_size);
+ return jbd2_journal_begin_ordered_truncate(
+ OCFS2_SB(inode->i_sb)->journal->j_journal,
+ &OCFS2_I(inode)->ip_jinode,
+ new_size);
}
#endif /* OCFS2_JOURNAL_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index ad5c24a29ed..077384135f4 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -210,6 +210,7 @@ struct ocfs2_journal;
struct ocfs2_slot_info;
struct ocfs2_recovery_map;
struct ocfs2_quota_recovery;
+struct ocfs2_dentry_lock;
struct ocfs2_super
{
struct task_struct *commit_task;
@@ -325,6 +326,11 @@ struct ocfs2_super
struct list_head blocked_lock_list;
unsigned long blocked_lock_count;
+ /* List of dentry locks to release. Anyone can add locks to
+ * the list, ocfs2_wq processes the list */
+ struct ocfs2_dentry_lock *dentry_lock_list;
+ struct work_struct dentry_lock_work;
+
wait_queue_head_t osb_mount_event;
/* Truncate log info */
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index f4efa89baee..1ed0f7c8686 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -754,7 +754,9 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
if (dquot->dq_flags & mask)
sync = 1;
spin_unlock(&dq_data_lock);
- if (!sync) {
+ /* This is a slight hack but we can't afford getting global quota
+ * lock if we already have a transaction started. */
+ if (!sync || journal_current_handle()) {
status = ocfs2_write_dquot(dquot);
goto out;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 43ed11345b5..b1cb38fbe80 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1887,6 +1887,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
journal->j_state = OCFS2_JOURNAL_FREE;
+ INIT_WORK(&osb->dentry_lock_work, ocfs2_drop_dl_inodes);
+ osb->dentry_lock_list = NULL;
+
/* get some pseudo constants for clustersize bits */
osb->s_clustersize_bits =
le32_to_cpu(di->id2.i_super.s_clustersize_bits);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index e1d638af6ac..915039fffe6 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -4729,13 +4729,6 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
vb.vb_xv = (struct ocfs2_xattr_value_root *)
(vb.vb_bh->b_data + offset % blocksize);
- ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
/*
* From here on out we have to dirty the bucket. The generic
* value calls only modify one of the bucket's bhs, but we need
@@ -4748,12 +4741,18 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
if (ret) {
mlog_errno(ret);
- goto out_dirty;
+ goto out;
+ }
+
+ ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
xe->xe_value_size = cpu_to_le64(len);
-out_dirty:
ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
out:
diff --git a/fs/seq_file.c b/fs/seq_file.c
index b569ff1c4dc..a1a4cfe1921 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -48,12 +48,78 @@ int seq_open(struct file *file, const struct seq_operations *op)
*/
file->f_version = 0;
- /* SEQ files support lseek, but not pread/pwrite */
- file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
+ /*
+ * seq_files support lseek() and pread(). They do not implement
+ * write() at all, but we clear FMODE_PWRITE here for historical
+ * reasons.
+ *
+ * If a client of seq_files a) implements file.write() and b) wishes to
+ * support pwrite() then that client will need to implement its own
+ * file.open() which calls seq_open() and then sets FMODE_PWRITE.
+ */
+ file->f_mode &= ~FMODE_PWRITE;
return 0;
}
EXPORT_SYMBOL(seq_open);
+static int traverse(struct seq_file *m, loff_t offset)
+{
+ loff_t pos = 0, index;
+ int error = 0;
+ void *p;
+
+ m->version = 0;
+ index = 0;
+ m->count = m->from = 0;
+ if (!offset) {
+ m->index = index;
+ return 0;
+ }
+ if (!m->buf) {
+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+ if (!m->buf)
+ return -ENOMEM;
+ }
+ p = m->op->start(m, &index);
+ while (p) {
+ error = PTR_ERR(p);
+ if (IS_ERR(p))
+ break;
+ error = m->op->show(m, p);
+ if (error < 0)
+ break;
+ if (unlikely(error)) {
+ error = 0;
+ m->count = 0;
+ }
+ if (m->count == m->size)
+ goto Eoverflow;
+ if (pos + m->count > offset) {
+ m->from = offset - pos;
+ m->count -= m->from;
+ m->index = index;
+ break;
+ }
+ pos += m->count;
+ m->count = 0;
+ if (pos == offset) {
+ index++;
+ m->index = index;
+ break;
+ }
+ p = m->op->next(m, p, &index);
+ }
+ m->op->stop(m, p);
+ m->index = index;
+ return error;
+
+Eoverflow:
+ m->op->stop(m, p);
+ kfree(m->buf);
+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+ return !m->buf ? -ENOMEM : -EAGAIN;
+}
+
/**
* seq_read - ->read() method for sequential files.
* @file: the file to read from
@@ -73,6 +139,22 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
int err = 0;
mutex_lock(&m->lock);
+
+ /* Don't assume *ppos is where we left it */
+ if (unlikely(*ppos != m->read_pos)) {
+ m->read_pos = *ppos;
+ while ((err = traverse(m, *ppos)) == -EAGAIN)
+ ;
+ if (err) {
+ /* With prejudice... */
+ m->read_pos = 0;
+ m->version = 0;
+ m->index = 0;
+ m->count = 0;
+ goto Done;
+ }
+ }
+
/*
* seq_file->op->..m_start/m_stop/m_next may do special actions
* or optimisations based on the file->f_version, so we want to
@@ -172,8 +254,10 @@ Fill:
Done:
if (!copied)
copied = err;
- else
+ else {
*ppos += copied;
+ m->read_pos += copied;
+ }
file->f_version = m->version;
mutex_unlock(&m->lock);
return copied;
@@ -186,63 +270,6 @@ Efault:
}
EXPORT_SYMBOL(seq_read);
-static int traverse(struct seq_file *m, loff_t offset)
-{
- loff_t pos = 0, index;
- int error = 0;
- void *p;
-
- m->version = 0;
- index = 0;
- m->count = m->from = 0;
- if (!offset) {
- m->index = index;
- return 0;
- }
- if (!m->buf) {
- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
- if (!m->buf)
- return -ENOMEM;
- }
- p = m->op->start(m, &index);
- while (p) {
- error = PTR_ERR(p);
- if (IS_ERR(p))
- break;
- error = m->op->show(m, p);
- if (error < 0)
- break;
- if (unlikely(error)) {
- error = 0;
- m->count = 0;
- }
- if (m->count == m->size)
- goto Eoverflow;
- if (pos + m->count > offset) {
- m->from = offset - pos;
- m->count -= m->from;
- m->index = index;
- break;
- }
- pos += m->count;
- m->count = 0;
- if (pos == offset) {
- index++;
- m->index = index;
- break;
- }
- p = m->op->next(m, p, &index);
- }
- m->op->stop(m, p);
- return error;
-
-Eoverflow:
- m->op->stop(m, p);
- kfree(m->buf);
- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
- return !m->buf ? -ENOMEM : -EAGAIN;
-}
-
/**
* seq_lseek - ->llseek() method for sequential files.
* @file: the file in question
@@ -265,16 +292,18 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
if (offset < 0)
break;
retval = offset;
- if (offset != file->f_pos) {
+ if (offset != m->read_pos) {
while ((retval=traverse(m, offset)) == -EAGAIN)
;
if (retval) {
/* with extreme prejudice... */
file->f_pos = 0;
+ m->read_pos = 0;
m->version = 0;
m->index = 0;
m->count = 0;
} else {
+ m->read_pos = offset;
retval = file->f_pos = offset;
}
}
diff --git a/fs/super.c b/fs/super.c
index 645e5403f2a..8349ed6b141 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -82,7 +82,22 @@ static struct super_block *alloc_super(struct file_system_type *type)
* lock ordering than usbfs:
*/
lockdep_set_class(&s->s_lock, &type->s_lock_key);
- down_write(&s->s_umount);
+ /*
+ * sget() can have s_umount recursion.
+ *
+ * When it cannot find a suitable sb, it allocates a new
+ * one (this one), and tries again to find a suitable old
+ * one.
+ *
+ * In case that succeeds, it will acquire the s_umount
+ * lock of the old one. Since these are clearly distrinct
+ * locks, and this object isn't exposed yet, there's no
+ * risk of deadlocks.
+ *
+ * Annotate this by putting this lock in a different
+ * subclass.
+ */
+ down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
s->s_count = S_BIAS;
atomic_set(&s->s_active, 1);
mutex_init(&s->s_vfs_rename_mutex);
@@ -301,7 +316,7 @@ void generic_shutdown_super(struct super_block *sb)
/*
* wait for asynchronous fs operations to finish before going further
*/
- async_synchronize_full_special(&sb->s_async_list);
+ async_synchronize_full_domain(&sb->s_async_list);
/* bad name - it should be evict_inodes() */
invalidate_inodes(sb);
@@ -470,7 +485,7 @@ restart:
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
- async_synchronize_full_special(&sb->s_async_list);
+ async_synchronize_full_domain(&sb->s_async_list);
if (sb->s_root && (wait || sb->s_dirt))
sb->s_op->sync_fs(sb, wait);
up_read(&sb->s_umount);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 6a123b8ff3f..b042bd7034b 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -186,10 +186,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(TFD_NONBLOCK != O_NONBLOCK);
- if (flags & ~(TFD_CLOEXEC | TFD_NONBLOCK))
- return -EINVAL;
- if (clockid != CLOCK_MONOTONIC &&
- clockid != CLOCK_REALTIME)
+ if ((flags & ~TFD_CREATE_FLAGS) ||
+ (clockid != CLOCK_MONOTONIC &&
+ clockid != CLOCK_REALTIME))
return -EINVAL;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -201,7 +200,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
- flags & (O_CLOEXEC | O_NONBLOCK));
+ flags & TFD_SHARED_FCNTL_FLAGS);
if (ufd < 0)
kfree(ctx);
@@ -219,7 +218,8 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
return -EFAULT;
- if (!timespec_valid(&ktmr.it_value) ||
+ if ((flags & ~TFD_SETTIME_FLAGS) ||
+ !timespec_valid(&ktmr.it_value) ||
!timespec_valid(&ktmr.it_interval))
return -EINVAL;
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 175f9c590b7..f393620890e 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -689,7 +689,7 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free)
}
/**
- * ubifs_get_free_space - return amount of free space.
+ * ubifs_get_free_space_nolock - return amount of free space.
* @c: UBIFS file-system description object
*
* This function calculates amount of free space to report to user-space.
@@ -704,16 +704,14 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free)
* traditional file-systems, because they have way less overhead than UBIFS.
* So, to keep users happy, UBIFS tries to take the overhead into account.
*/
-long long ubifs_get_free_space(struct ubifs_info *c)
+long long ubifs_get_free_space_nolock(struct ubifs_info *c)
{
- int min_idx_lebs, rsvd_idx_lebs, lebs;
+ int rsvd_idx_lebs, lebs;
long long available, outstanding, free;
- spin_lock(&c->space_lock);
- min_idx_lebs = c->min_idx_lebs;
- ubifs_assert(min_idx_lebs == ubifs_calc_min_idx_lebs(c));
+ ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c));
outstanding = c->budg_data_growth + c->budg_dd_growth;
- available = ubifs_calc_available(c, min_idx_lebs);
+ available = ubifs_calc_available(c, c->min_idx_lebs);
/*
* When reporting free space to user-space, UBIFS guarantees that it is
@@ -726,15 +724,14 @@ long long ubifs_get_free_space(struct ubifs_info *c)
* Note, the calculations below are similar to what we have in
* 'do_budget_space()', so refer there for comments.
*/
- if (min_idx_lebs > c->lst.idx_lebs)
- rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs;
+ if (c->min_idx_lebs > c->lst.idx_lebs)
+ rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs;
else
rsvd_idx_lebs = 0;
lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
c->lst.taken_empty_lebs;
lebs -= rsvd_idx_lebs;
available += lebs * (c->dark_wm - c->leb_overhead);
- spin_unlock(&c->space_lock);
if (available > outstanding)
free = ubifs_reported_space(c, available - outstanding);
@@ -742,3 +739,21 @@ long long ubifs_get_free_space(struct ubifs_info *c)
free = 0;
return free;
}
+
+/**
+ * ubifs_get_free_space - return amount of free space.
+ * @c: UBIFS file-system description object
+ *
+ * This function calculates and retuns amount of free space to report to
+ * user-space.
+ */
+long long ubifs_get_free_space(struct ubifs_info *c)
+{
+ long long free;
+
+ spin_lock(&c->space_lock);
+ free = ubifs_get_free_space_nolock(c);
+ spin_unlock(&c->space_lock);
+
+ return free;
+}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 792c5a16c18..e975bd82f38 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -620,9 +620,11 @@ void dbg_dump_budg(struct ubifs_info *c)
c->dark_wm, c->dead_wm, c->max_idx_node_sz);
printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
c->gc_lnum, c->ihead_lnum);
- for (i = 0; i < c->jhead_cnt; i++)
- printk(KERN_DEBUG "\tjhead %d\t LEB %d\n",
- c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum);
+ /* If we are in R/O mode, journal heads do not exist */
+ if (c->jheads)
+ for (i = 0; i < c->jhead_cnt; i++)
+ printk(KERN_DEBUG "\tjhead %d\t LEB %d\n",
+ c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum);
for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
bud = rb_entry(rb, struct ubifs_bud, rb);
printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
@@ -637,10 +639,7 @@ void dbg_dump_budg(struct ubifs_info *c)
/* Print budgeting predictions */
available = ubifs_calc_available(c, c->min_idx_lebs);
outstanding = c->budg_data_growth + c->budg_dd_growth;
- if (available > outstanding)
- free = ubifs_reported_space(c, available - outstanding);
- else
- free = 0;
+ free = ubifs_get_free_space_nolock(c);
printk(KERN_DEBUG "Budgeting predictions:\n");
printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n",
available, outstanding, free);
@@ -861,6 +860,65 @@ void dbg_dump_index(struct ubifs_info *c)
}
/**
+ * dbg_save_space_info - save information about flash space.
+ * @c: UBIFS file-system description object
+ *
+ * This function saves information about UBIFS free space, dirty space, etc, in
+ * order to check it later.
+ */
+void dbg_save_space_info(struct ubifs_info *c)
+{
+ struct ubifs_debug_info *d = c->dbg;
+
+ ubifs_get_lp_stats(c, &d->saved_lst);
+
+ spin_lock(&c->space_lock);
+ d->saved_free = ubifs_get_free_space_nolock(c);
+ spin_unlock(&c->space_lock);
+}
+
+/**
+ * dbg_check_space_info - check flash space information.
+ * @c: UBIFS file-system description object
+ *
+ * This function compares current flash space information with the information
+ * which was saved when the 'dbg_save_space_info()' function was called.
+ * Returns zero if the information has not changed, and %-EINVAL it it has
+ * changed.
+ */
+int dbg_check_space_info(struct ubifs_info *c)
+{
+ struct ubifs_debug_info *d = c->dbg;
+ struct ubifs_lp_stats lst;
+ long long avail, free;
+
+ spin_lock(&c->space_lock);
+ avail = ubifs_calc_available(c, c->min_idx_lebs);
+ spin_unlock(&c->space_lock);
+ free = ubifs_get_free_space(c);
+
+ if (free != d->saved_free) {
+ ubifs_err("free space changed from %lld to %lld",
+ d->saved_free, free);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ ubifs_msg("saved lprops statistics dump");
+ dbg_dump_lstats(&d->saved_lst);
+ ubifs_get_lp_stats(c, &lst);
+ ubifs_msg("current lprops statistics dump");
+ dbg_dump_lstats(&d->saved_lst);
+ spin_lock(&c->space_lock);
+ dbg_dump_budg(c);
+ spin_unlock(&c->space_lock);
+ dump_stack();
+ return -EINVAL;
+}
+
+/**
* dbg_check_synced_i_size - check synchronized inode size.
* @inode: inode to check
*
@@ -1349,7 +1407,7 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
* @c: UBIFS file-system description object
* @leaf_cb: called for each leaf node
* @znode_cb: called for each indexing node
- * @priv: private date which is passed to callbacks
+ * @priv: private data which is passed to callbacks
*
* This function walks the UBIFS index and calls the @leaf_cb for each leaf
* node and @znode_cb for each indexing node. Returns zero in case of success
@@ -2409,7 +2467,7 @@ void ubifs_debugging_exit(struct ubifs_info *c)
* Root directory for UBIFS stuff in debugfs. Contains sub-directories which
* contain the stuff specific to particular file-system mounts.
*/
-static struct dentry *debugfs_rootdir;
+static struct dentry *dfs_rootdir;
/**
* dbg_debugfs_init - initialize debugfs file-system.
@@ -2421,9 +2479,9 @@ static struct dentry *debugfs_rootdir;
*/
int dbg_debugfs_init(void)
{
- debugfs_rootdir = debugfs_create_dir("ubifs", NULL);
- if (IS_ERR(debugfs_rootdir)) {
- int err = PTR_ERR(debugfs_rootdir);
+ dfs_rootdir = debugfs_create_dir("ubifs", NULL);
+ if (IS_ERR(dfs_rootdir)) {
+ int err = PTR_ERR(dfs_rootdir);
ubifs_err("cannot create \"ubifs\" debugfs directory, "
"error %d\n", err);
return err;
@@ -2437,7 +2495,7 @@ int dbg_debugfs_init(void)
*/
void dbg_debugfs_exit(void)
{
- debugfs_remove(debugfs_rootdir);
+ debugfs_remove(dfs_rootdir);
}
static int open_debugfs_file(struct inode *inode, struct file *file)
@@ -2452,13 +2510,13 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
struct ubifs_info *c = file->private_data;
struct ubifs_debug_info *d = c->dbg;
- if (file->f_path.dentry == d->dump_lprops)
+ if (file->f_path.dentry == d->dfs_dump_lprops)
dbg_dump_lprops(c);
- else if (file->f_path.dentry == d->dump_budg) {
+ else if (file->f_path.dentry == d->dfs_dump_budg) {
spin_lock(&c->space_lock);
dbg_dump_budg(c);
spin_unlock(&c->space_lock);
- } else if (file->f_path.dentry == d->dump_tnc) {
+ } else if (file->f_path.dentry == d->dfs_dump_tnc) {
mutex_lock(&c->tnc_mutex);
dbg_dump_tnc(c);
mutex_unlock(&c->tnc_mutex);
@@ -2469,7 +2527,7 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations debugfs_fops = {
+static const struct file_operations dfs_fops = {
.open = open_debugfs_file,
.write = write_debugfs_file,
.owner = THIS_MODULE,
@@ -2494,36 +2552,32 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
struct dentry *dent;
struct ubifs_debug_info *d = c->dbg;
- sprintf(d->debugfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
- d->debugfs_dir = debugfs_create_dir(d->debugfs_dir_name,
- debugfs_rootdir);
- if (IS_ERR(d->debugfs_dir)) {
- err = PTR_ERR(d->debugfs_dir);
+ sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
+ d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
+ if (IS_ERR(d->dfs_dir)) {
+ err = PTR_ERR(d->dfs_dir);
ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
- d->debugfs_dir_name, err);
+ d->dfs_dir_name, err);
goto out;
}
fname = "dump_lprops";
- dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c,
- &debugfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
- d->dump_lprops = dent;
+ d->dfs_dump_lprops = dent;
fname = "dump_budg";
- dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c,
- &debugfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
- d->dump_budg = dent;
+ d->dfs_dump_budg = dent;
fname = "dump_tnc";
- dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c,
- &debugfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
- d->dump_tnc = dent;
+ d->dfs_dump_tnc = dent;
return 0;
@@ -2531,7 +2585,7 @@ out_remove:
err = PTR_ERR(dent);
ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
fname, err);
- debugfs_remove_recursive(d->debugfs_dir);
+ debugfs_remove_recursive(d->dfs_dir);
out:
return err;
}
@@ -2542,7 +2596,7 @@ out:
*/
void dbg_debugfs_exit_fs(struct ubifs_info *c)
{
- debugfs_remove_recursive(c->dbg->debugfs_dir);
+ debugfs_remove_recursive(c->dbg->dfs_dir);
}
#endif /* CONFIG_UBIFS_FS_DEBUG */
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 9820d6999f7..c1cd73b2e06 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -41,15 +41,17 @@
* @chk_lpt_wastage: used by LPT tree size checker
* @chk_lpt_lebs: used by LPT tree size checker
* @new_nhead_offs: used by LPT tree size checker
- * @new_ihead_lnum: used by debugging to check ihead_lnum
- * @new_ihead_offs: used by debugging to check ihead_offs
+ * @new_ihead_lnum: used by debugging to check @c->ihead_lnum
+ * @new_ihead_offs: used by debugging to check @c->ihead_offs
*
- * debugfs_dir_name: name of debugfs directory containing this file-system's
- * files
- * debugfs_dir: direntry object of the file-system debugfs directory
- * dump_lprops: "dump lprops" debugfs knob
- * dump_budg: "dump budgeting information" debugfs knob
- * dump_tnc: "dump TNC" debugfs knob
+ * @saved_lst: saved lprops statistics (used by 'dbg_save_space_info()')
+ * @saved_free: saved free space (used by 'dbg_save_space_info()')
+ *
+ * dfs_dir_name: name of debugfs directory containing this file-system's files
+ * dfs_dir: direntry object of the file-system debugfs directory
+ * dfs_dump_lprops: "dump lprops" debugfs knob
+ * dfs_dump_budg: "dump budgeting information" debugfs knob
+ * dfs_dump_tnc: "dump TNC" debugfs knob
*/
struct ubifs_debug_info {
void *buf;
@@ -69,11 +71,14 @@ struct ubifs_debug_info {
int new_ihead_lnum;
int new_ihead_offs;
- char debugfs_dir_name[100];
- struct dentry *debugfs_dir;
- struct dentry *dump_lprops;
- struct dentry *dump_budg;
- struct dentry *dump_tnc;
+ struct ubifs_lp_stats saved_lst;
+ long long saved_free;
+
+ char dfs_dir_name[100];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_dump_lprops;
+ struct dentry *dfs_dump_budg;
+ struct dentry *dfs_dump_tnc;
};
#define ubifs_assert(expr) do { \
@@ -297,7 +302,8 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
dbg_znode_callback znode_cb, void *priv);
/* Checking functions */
-
+void dbg_save_space_info(struct ubifs_info *c);
+int dbg_check_space_info(struct ubifs_info *c);
int dbg_check_lprops(struct ubifs_info *c);
int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot);
int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot);
@@ -439,6 +445,8 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
#define dbg_old_index_check_init(c, zroot) 0
+#define dbg_save_space_info(c) ({})
+#define dbg_check_space_info(c) 0
#define dbg_check_old_index(c, zroot) 0
#define dbg_check_cats(c) 0
#define dbg_check_ltab(c) 0
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index f448ab1f9c3..f55d523c52b 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -482,30 +482,29 @@ static int ubifs_dir_release(struct inode *dir, struct file *file)
}
/**
- * lock_2_inodes - lock two UBIFS inodes.
+ * lock_2_inodes - a wrapper for locking two UBIFS inodes.
* @inode1: first inode
* @inode2: second inode
+ *
+ * We do not implement any tricks to guarantee strict lock ordering, because
+ * VFS has already done it for us on the @i_mutex. So this is just a simple
+ * wrapper function.
*/
static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
{
- if (inode1->i_ino < inode2->i_ino) {
- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_2);
- mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_3);
- } else {
- mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_3);
- }
+ mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
+ mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
}
/**
- * unlock_2_inodes - unlock two UBIFS inodes inodes.
+ * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
* @inode1: first inode
* @inode2: second inode
*/
static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
{
- mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
+ mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
}
static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
@@ -527,6 +526,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu",
dentry->d_name.len, dentry->d_name.name, inode->i_ino,
inode->i_nlink, dir->i_ino);
+ ubifs_assert(mutex_is_locked(&dir->i_mutex));
+ ubifs_assert(mutex_is_locked(&inode->i_mutex));
err = dbg_check_synced_i_size(inode);
if (err)
return err;
@@ -580,6 +581,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu",
dentry->d_name.len, dentry->d_name.name, inode->i_ino,
inode->i_nlink, dir->i_ino);
+ ubifs_assert(mutex_is_locked(&dir->i_mutex));
+ ubifs_assert(mutex_is_locked(&inode->i_mutex));
err = dbg_check_synced_i_size(inode);
if (err)
return err;
@@ -667,7 +670,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len,
dentry->d_name.name, inode->i_ino, dir->i_ino);
-
+ ubifs_assert(mutex_is_locked(&dir->i_mutex));
+ ubifs_assert(mutex_is_locked(&inode->i_mutex));
err = check_dir_empty(c, dentry->d_inode);
if (err)
return err;
@@ -922,59 +926,30 @@ out_budg:
}
/**
- * lock_3_inodes - lock three UBIFS inodes for rename.
+ * lock_3_inodes - a wrapper for locking three UBIFS inodes.
* @inode1: first inode
* @inode2: second inode
* @inode3: third inode
*
- * For 'ubifs_rename()', @inode1 may be the same as @inode2 whereas @inode3 may
- * be null.
+ * This function is used for 'ubifs_rename()' and @inode1 may be the same as
+ * @inode2 whereas @inode3 may be %NULL.
+ *
+ * We do not implement any tricks to guarantee strict lock ordering, because
+ * VFS has already done it for us on the @i_mutex. So this is just a simple
+ * wrapper function.
*/
static void lock_3_inodes(struct inode *inode1, struct inode *inode2,
struct inode *inode3)
{
- struct inode *i1, *i2, *i3;
-
- if (!inode3) {
- if (inode1 != inode2) {
- lock_2_inodes(inode1, inode2);
- return;
- }
- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
- return;
- }
-
- if (inode1 == inode2) {
- lock_2_inodes(inode1, inode3);
- return;
- }
-
- /* 3 different inodes */
- if (inode1 < inode2) {
- i3 = inode2;
- if (inode1 < inode3) {
- i1 = inode1;
- i2 = inode3;
- } else {
- i1 = inode3;
- i2 = inode1;
- }
- } else {
- i3 = inode1;
- if (inode2 < inode3) {
- i1 = inode2;
- i2 = inode3;
- } else {
- i1 = inode3;
- i2 = inode2;
- }
- }
- mutex_lock_nested(&ubifs_inode(i1)->ui_mutex, WB_MUTEX_1);
- lock_2_inodes(i2, i3);
+ mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
+ if (inode2 != inode1)
+ mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
+ if (inode3)
+ mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3);
}
/**
- * unlock_3_inodes - unlock three UBIFS inodes for rename.
+ * unlock_3_inodes - a wrapper for unlocking three UBIFS inodes for rename.
* @inode1: first inode
* @inode2: second inode
* @inode3: third inode
@@ -982,11 +957,11 @@ static void lock_3_inodes(struct inode *inode1, struct inode *inode2,
static void unlock_3_inodes(struct inode *inode1, struct inode *inode2,
struct inode *inode3)
{
- mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
- if (inode1 != inode2)
- mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
if (inode3)
mutex_unlock(&ubifs_inode(inode3)->ui_mutex);
+ if (inode1 != inode2)
+ mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
+ mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
}
static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -1020,6 +995,11 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
"dir ino %lu", old_dentry->d_name.len, old_dentry->d_name.name,
old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len,
new_dentry->d_name.name, new_dir->i_ino);
+ ubifs_assert(mutex_is_locked(&old_dir->i_mutex));
+ ubifs_assert(mutex_is_locked(&new_dir->i_mutex));
+ if (unlink)
+ ubifs_assert(mutex_is_locked(&new_inode->i_mutex));
+
if (unlink && is_dir) {
err = check_dir_empty(c, new_inode);
@@ -1199,7 +1179,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
-struct inode_operations ubifs_dir_inode_operations = {
+const struct inode_operations ubifs_dir_inode_operations = {
.lookup = ubifs_lookup,
.create = ubifs_create,
.link = ubifs_link,
@@ -1219,7 +1199,7 @@ struct inode_operations ubifs_dir_inode_operations = {
#endif
};
-struct file_operations ubifs_dir_operations = {
+const struct file_operations ubifs_dir_operations = {
.llseek = ubifs_dir_llseek,
.release = ubifs_dir_release,
.read = generic_read_dir,
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index bf37374567f..93b6de51f26 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -432,7 +432,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
struct page *page;
-
ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
if (unlikely(c->ro_media))
@@ -1541,7 +1540,7 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-struct address_space_operations ubifs_file_address_operations = {
+const struct address_space_operations ubifs_file_address_operations = {
.readpage = ubifs_readpage,
.writepage = ubifs_writepage,
.write_begin = ubifs_write_begin,
@@ -1551,7 +1550,7 @@ struct address_space_operations ubifs_file_address_operations = {
.releasepage = ubifs_releasepage,
};
-struct inode_operations ubifs_file_inode_operations = {
+const struct inode_operations ubifs_file_inode_operations = {
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
#ifdef CONFIG_UBIFS_FS_XATTR
@@ -1562,14 +1561,14 @@ struct inode_operations ubifs_file_inode_operations = {
#endif
};
-struct inode_operations ubifs_symlink_inode_operations = {
+const struct inode_operations ubifs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ubifs_follow_link,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
};
-struct file_operations ubifs_file_operations = {
+const struct file_operations ubifs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 9832f9abe28..a711d33b3d3 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -31,6 +31,26 @@
* to be reused. Garbage collection will cause the number of dirty index nodes
* to grow, however sufficient space is reserved for the index to ensure the
* commit will never run out of space.
+ *
+ * Notes about dead watermark. At current UBIFS implementation we assume that
+ * LEBs which have less than @c->dead_wm bytes of free + dirty space are full
+ * and not worth garbage-collecting. The dead watermark is one min. I/O unit
+ * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS
+ * Garbage Collector has to synchronize the GC head's write buffer before
+ * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can
+ * actually reclaim even very small pieces of dirty space by garbage collecting
+ * enough dirty LEBs, but we do not bother doing this at this implementation.
+ *
+ * Notes about dark watermark. The results of GC work depends on how big are
+ * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed,
+ * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would
+ * have to waste large pieces of free space at the end of LEB B, because nodes
+ * from LEB A would not fit. And the worst situation is when all nodes are of
+ * maximum size. So dark watermark is the amount of free + dirty space in LEB
+ * which are guaranteed to be reclaimable. If LEB has less space, the GC migh
+ * be unable to reclaim it. So, LEBs with free + dirty greater than dark
+ * watermark are "good" LEBs from GC's point of few. The other LEBs are not so
+ * good, and GC takes extra care when moving them.
*/
#include <linux/pagemap.h>
@@ -381,7 +401,7 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
/*
* Don't release the LEB until after the next commit, because
- * it may contain date which is needed for recovery. So
+ * it may contain data which is needed for recovery. So
* although we freed this LEB, it will become usable only after
* the commit.
*/
@@ -810,8 +830,9 @@ out:
* ubifs_destroy_idx_gc - destroy idx_gc list.
* @c: UBIFS file-system description object
*
- * This function destroys the idx_gc list. It is called when unmounting or
- * remounting read-only so locks are not needed.
+ * This function destroys the @c->idx_gc list. It is called when unmounting
+ * so locks are not needed. Returns zero in case of success and a negative
+ * error code in case of failure.
*/
void ubifs_destroy_idx_gc(struct ubifs_info *c)
{
@@ -824,7 +845,6 @@ void ubifs_destroy_idx_gc(struct ubifs_info *c)
list_del(&idx_gc->list);
kfree(idx_gc);
}
-
}
/**
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 01682713af6..e8e632a1dcd 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -29,7 +29,7 @@
* would have been wasted for padding to the nearest minimal I/O unit boundary.
* Instead, data first goes to the write-buffer and is flushed when the
* buffer is full or when it is not used for some time (by timer). This is
- * similarto the mechanism is used by JFFS2.
+ * similar to the mechanism is used by JFFS2.
*
* Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by
* mutexes defined inside these objects. Since sometimes upper-level code
@@ -75,7 +75,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
* @lnum: logical eraseblock number
* @offs: offset within the logical eraseblock
* @quiet: print no messages
- * @chk_crc: indicates whether to always check the CRC
+ * @must_chk_crc: indicates whether to always check the CRC
*
* This function checks node magic number and CRC checksum. This function also
* validates node length to prevent UBIFS from becoming crazy when an attacker
@@ -83,11 +83,17 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
* node length in the common header could cause UBIFS to read memory outside of
* allocated buffer when checking the CRC checksum.
*
- * This function returns zero in case of success %-EUCLEAN in case of bad CRC
- * or magic.
+ * This function may skip data nodes CRC checking if @c->no_chk_data_crc is
+ * true, which is controlled by corresponding UBIFS mount option. However, if
+ * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is
+ * checked. Similarly, if @c->always_chk_crc is true, @c->no_chk_data_crc is
+ * ignored and CRC is checked.
+ *
+ * This function returns zero in case of success and %-EUCLEAN in case of bad
+ * CRC or magic.
*/
int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
- int offs, int quiet, int chk_crc)
+ int offs, int quiet, int must_chk_crc)
{
int err = -EINVAL, type, node_len;
uint32_t crc, node_crc, magic;
@@ -123,9 +129,9 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
node_len > c->ranges[type].max_len)
goto out_len;
- if (!chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc)
- if (c->no_chk_data_crc)
- return 0;
+ if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc &&
+ c->no_chk_data_crc)
+ return 0;
crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
node_crc = le32_to_cpu(ch->crc);
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 9b7c54e0cd2..a11ca0958a2 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -208,7 +208,7 @@ again:
offs = 0;
out:
- err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, UBI_SHORTTERM);
+ err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype);
if (err)
goto out_unlock;
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index dfd2bcece27..4cdd284dea5 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -635,10 +635,10 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
* @c: UBIFS file-system description object
* @st: return statistics
*/
-void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *st)
+void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst)
{
spin_lock(&c->space_lock);
- memcpy(st, &c->lst, sizeof(struct ubifs_lp_stats));
+ memcpy(lst, &c->lst, sizeof(struct ubifs_lp_stats));
spin_unlock(&c->space_lock);
}
@@ -678,6 +678,9 @@ int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
out:
ubifs_release_lprops(c);
+ if (err)
+ ubifs_err("cannot change properties of LEB %d, error %d",
+ lnum, err);
return err;
}
@@ -714,6 +717,9 @@ int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
out:
ubifs_release_lprops(c);
+ if (err)
+ ubifs_err("cannot update properties of LEB %d, error %d",
+ lnum, err);
return err;
}
@@ -737,6 +743,8 @@ int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp)
lpp = ubifs_lpt_lookup(c, lnum);
if (IS_ERR(lpp)) {
err = PTR_ERR(lpp);
+ ubifs_err("cannot read properties of LEB %d, error %d",
+ lnum, err);
goto out;
}
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 96ca9570717..3216a1f277f 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -556,23 +556,23 @@ no_space:
}
/**
- * next_pnode - find next pnode.
+ * next_pnode_to_dirty - find next pnode to dirty.
* @c: UBIFS file-system description object
* @pnode: pnode
*
- * This function returns the next pnode or %NULL if there are no more pnodes.
+ * This function returns the next pnode to dirty or %NULL if there are no more
+ * pnodes. Note that pnodes that have never been written (lnum == 0) are
+ * skipped.
*/
-static struct ubifs_pnode *next_pnode(struct ubifs_info *c,
- struct ubifs_pnode *pnode)
+static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
+ struct ubifs_pnode *pnode)
{
struct ubifs_nnode *nnode;
int iip;
/* Try to go right */
nnode = pnode->parent;
- iip = pnode->iip + 1;
- if (iip < UBIFS_LPT_FANOUT) {
- /* We assume here that LEB zero is never an LPT LEB */
+ for (iip = pnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) {
if (nnode->nbranch[iip].lnum)
return ubifs_get_pnode(c, nnode, iip);
}
@@ -583,8 +583,11 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c,
nnode = nnode->parent;
if (!nnode)
return NULL;
- /* We assume here that LEB zero is never an LPT LEB */
- } while (iip >= UBIFS_LPT_FANOUT || !nnode->nbranch[iip].lnum);
+ for (; iip < UBIFS_LPT_FANOUT; iip++) {
+ if (nnode->nbranch[iip].lnum)
+ break;
+ }
+ } while (iip >= UBIFS_LPT_FANOUT);
/* Go right */
nnode = ubifs_get_nnode(c, nnode, iip);
@@ -593,12 +596,29 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c,
/* Go down to level 1 */
while (nnode->level > 1) {
- nnode = ubifs_get_nnode(c, nnode, 0);
+ for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++) {
+ if (nnode->nbranch[iip].lnum)
+ break;
+ }
+ if (iip >= UBIFS_LPT_FANOUT) {
+ /*
+ * Should not happen, but we need to keep going
+ * if it does.
+ */
+ iip = 0;
+ }
+ nnode = ubifs_get_nnode(c, nnode, iip);
if (IS_ERR(nnode))
return (void *)nnode;
}
- return ubifs_get_pnode(c, nnode, 0);
+ for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++)
+ if (nnode->nbranch[iip].lnum)
+ break;
+ if (iip >= UBIFS_LPT_FANOUT)
+ /* Should not happen, but we need to keep going if it does */
+ iip = 0;
+ return ubifs_get_pnode(c, nnode, iip);
}
/**
@@ -688,7 +708,7 @@ static int make_tree_dirty(struct ubifs_info *c)
pnode = pnode_lookup(c, 0);
while (pnode) {
do_make_pnode_dirty(c, pnode);
- pnode = next_pnode(c, pnode);
+ pnode = next_pnode_to_dirty(c, pnode);
if (IS_ERR(pnode))
return PTR_ERR(pnode);
}
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 71d5493bf56..a88f33801b9 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -354,7 +354,7 @@ int ubifs_write_master(struct ubifs_info *c)
int err, lnum, offs, len;
if (c->ro_media)
- return -EINVAL;
+ return -EROFS;
lnum = UBIFS_MST_LNUM;
offs = c->mst_offs + c->mst_node_alsz;
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 9e6f403f170..152a7b34a14 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -46,7 +46,7 @@
* Orphans are accumulated in a rb-tree. When an inode's link count drops to
* zero, the inode number is added to the rb-tree. It is removed from the tree
* when the inode is deleted. Any new orphans that are in the orphan tree when
- * the commit is run, are written to the orphan area in 1 or more orph nodes.
+ * the commit is run, are written to the orphan area in 1 or more orphan nodes.
* If the orphan area is full, it is consolidated to make space. There is
* always enough space because validation prevents the user from creating more
* than the maximum number of orphans allowed.
@@ -231,7 +231,7 @@ static int tot_avail_orphs(struct ubifs_info *c)
}
/**
- * do_write_orph_node - write a node
+ * do_write_orph_node - write a node to the orphan head.
* @c: UBIFS file-system description object
* @len: length of node
* @atomic: write atomically
@@ -264,11 +264,11 @@ static int do_write_orph_node(struct ubifs_info *c, int len, int atomic)
}
/**
- * write_orph_node - write an orph node
+ * write_orph_node - write an orphan node.
* @c: UBIFS file-system description object
* @atomic: write atomically
*
- * This function builds an orph node from the cnext list and writes it to the
+ * This function builds an orphan node from the cnext list and writes it to the
* orphan head. On success, %0 is returned, otherwise a negative error code
* is returned.
*/
@@ -326,11 +326,11 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
}
/**
- * write_orph_nodes - write orph nodes until there are no more to commit
+ * write_orph_nodes - write orphan nodes until there are no more to commit.
* @c: UBIFS file-system description object
* @atomic: write atomically
*
- * This function writes orph nodes for all the orphans to commit. On success,
+ * This function writes orphan nodes for all the orphans to commit. On success,
* %0 is returned, otherwise a negative error code is returned.
*/
static int write_orph_nodes(struct ubifs_info *c, int atomic)
@@ -478,14 +478,14 @@ int ubifs_orphan_end_commit(struct ubifs_info *c)
}
/**
- * clear_orphans - erase all LEBs used for orphans.
+ * ubifs_clear_orphans - erase all LEBs used for orphans.
* @c: UBIFS file-system description object
*
* If recovery is not required, then the orphans from the previous session
* are not needed. This function locates the LEBs used to record
* orphans, and un-maps them.
*/
-static int clear_orphans(struct ubifs_info *c)
+int ubifs_clear_orphans(struct ubifs_info *c)
{
int lnum, err;
@@ -547,9 +547,9 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
* do_kill_orphans - remove orphan inodes from the index.
* @c: UBIFS file-system description object
* @sleb: scanned LEB
- * @last_cmt_no: cmt_no of last orph node read is passed and returned here
+ * @last_cmt_no: cmt_no of last orphan node read is passed and returned here
* @outofdate: whether the LEB is out of date is returned here
- * @last_flagged: whether the end orph node is encountered
+ * @last_flagged: whether the end orphan node is encountered
*
* This function is a helper to the 'kill_orphans()' function. It goes through
* every orphan node in a LEB and for every inode number recorded, removes
@@ -580,8 +580,8 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
/*
* The commit number on the master node may be less, because
* of a failed commit. If there are several failed commits in a
- * row, the commit number written on orph nodes will continue to
- * increase (because the commit number is adjusted here) even
+ * row, the commit number written on orphan nodes will continue
+ * to increase (because the commit number is adjusted here) even
* though the commit number on the master node stays the same
* because the master node has not been re-written.
*/
@@ -589,9 +589,9 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
c->cmt_no = cmt_no;
if (cmt_no < *last_cmt_no && *last_flagged) {
/*
- * The last orph node had a higher commit number and was
- * flagged as the last written for that commit number.
- * That makes this orph node, out of date.
+ * The last orphan node had a higher commit number and
+ * was flagged as the last written for that commit
+ * number. That makes this orphan node, out of date.
*/
if (!first) {
ubifs_err("out of order commit number %llu in "
@@ -658,10 +658,10 @@ static int kill_orphans(struct ubifs_info *c)
/*
* Orph nodes always start at c->orph_first and are written to each
* successive LEB in turn. Generally unused LEBs will have been unmapped
- * but may contain out of date orph nodes if the unmap didn't go
- * through. In addition, the last orph node written for each commit is
+ * but may contain out of date orphan nodes if the unmap didn't go
+ * through. In addition, the last orphan node written for each commit is
* marked (top bit of orph->cmt_no is set to 1). It is possible that
- * there are orph nodes from the next commit (i.e. the commit did not
+ * there are orphan nodes from the next commit (i.e. the commit did not
* complete successfully). In that case, no orphans will have been lost
* due to the way that orphans are written, and any orphans added will
* be valid orphans anyway and so can be deleted.
@@ -718,7 +718,7 @@ int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only)
if (unclean)
err = kill_orphans(c);
else if (!read_only)
- err = clear_orphans(c);
+ err = ubifs_clear_orphans(c);
return err;
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 89556ee7251..1182b66a549 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -397,6 +397,7 @@ static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = UBIFS_MAX_NLEN;
buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]);
buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]);
+ ubifs_assert(buf->f_bfree <= c->block_cnt);
return 0;
}
@@ -432,33 +433,24 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
int i, err;
struct ubifs_info *c = sb->s_fs_info;
struct writeback_control wbc = {
- .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
+ .sync_mode = WB_SYNC_ALL,
.range_start = 0,
.range_end = LLONG_MAX,
.nr_to_write = LONG_MAX,
};
/*
- * Note by akpm about WB_SYNC_NONE used above: zero @wait is just an
- * advisory thing to help the file system shove lots of data into the
- * queues. If some gets missed then it'll be picked up on the second
+ * Zero @wait is just an advisory thing to help the file system shove
+ * lots of data into the queues, and there will be the second
* '->sync_fs()' call, with non-zero @wait.
*/
+ if (!wait)
+ return 0;
if (sb->s_flags & MS_RDONLY)
return 0;
/*
- * Synchronize write buffers, because 'ubifs_run_commit()' does not
- * do this if it waits for an already running commit.
- */
- for (i = 0; i < c->jhead_cnt; i++) {
- err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
- if (err)
- return err;
- }
-
- /*
* VFS calls '->sync_fs()' before synchronizing all dirty inodes and
* pages, so synchronize them first, then commit the journal. Strictly
* speaking, it is not necessary to commit the journal here,
@@ -469,6 +461,16 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
*/
generic_sync_sb_inodes(sb, &wbc);
+ /*
+ * Synchronize write buffers, because 'ubifs_run_commit()' does not
+ * do this if it waits for an already running commit.
+ */
+ for (i = 0; i < c->jhead_cnt; i++) {
+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ if (err)
+ return err;
+ }
+
err = ubifs_run_commit(c);
if (err)
return err;
@@ -572,15 +574,8 @@ static int init_constants_early(struct ubifs_info *c)
c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX;
/*
- * Initialize dead and dark LEB space watermarks.
- *
- * Dead space is the space which cannot be used. Its watermark is
- * equivalent to min. I/O unit or minimum node size if it is greater
- * then min. I/O unit.
- *
- * Dark space is the space which might be used, or might not, depending
- * on which node should be written to the LEB. Its watermark is
- * equivalent to maximum UBIFS node size.
+ * Initialize dead and dark LEB space watermarks. See gc.c for comments
+ * about these values.
*/
c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size);
c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size);
@@ -741,12 +736,12 @@ static void init_constants_master(struct ubifs_info *c)
* take_gc_lnum - reserve GC LEB.
* @c: UBIFS file-system description object
*
- * This function ensures that the LEB reserved for garbage collection is
- * unmapped and is marked as "taken" in lprops. We also have to set free space
- * to LEB size and dirty space to zero, because lprops may contain out-of-date
- * information if the file-system was un-mounted before it has been committed.
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function ensures that the LEB reserved for garbage collection is marked
+ * as "taken" in lprops. We also have to set free space to LEB size and dirty
+ * space to zero, because lprops may contain out-of-date information if the
+ * file-system was un-mounted before it has been committed. This function
+ * returns zero in case of success and a negative error code in case of
+ * failure.
*/
static int take_gc_lnum(struct ubifs_info *c)
{
@@ -757,10 +752,6 @@ static int take_gc_lnum(struct ubifs_info *c)
return -EINVAL;
}
- err = ubifs_leb_unmap(c, c->gc_lnum);
- if (err)
- return err;
-
/* And we have to tell lprops that this LEB is taken */
err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0,
LPROPS_TAKEN, 0, 0);
@@ -966,13 +957,16 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
token = match_token(p, tokens, args);
switch (token) {
+ /*
+ * %Opt_fast_unmount and %Opt_norm_unmount options are ignored.
+ * We accepte them in order to be backware-compatible. But this
+ * should be removed at some point.
+ */
case Opt_fast_unmount:
c->mount_opts.unmount_mode = 2;
- c->fast_unmount = 1;
break;
case Opt_norm_unmount:
c->mount_opts.unmount_mode = 1;
- c->fast_unmount = 0;
break;
case Opt_bulk_read:
c->mount_opts.bulk_read = 2;
@@ -1094,12 +1088,7 @@ static int check_free_space(struct ubifs_info *c)
ubifs_err("insufficient free space to mount in read/write mode");
dbg_dump_budg(c);
dbg_dump_lprops(c);
- /*
- * We return %-EINVAL instead of %-ENOSPC because it seems to
- * be the closest error code mentioned in the mount function
- * documentation.
- */
- return -EINVAL;
+ return -ENOSPC;
}
return 0;
}
@@ -1286,10 +1275,19 @@ static int mount_ubifs(struct ubifs_info *c)
if (err)
goto out_orphans;
err = ubifs_rcvry_gc_commit(c);
- } else
+ } else {
err = take_gc_lnum(c);
- if (err)
- goto out_orphans;
+ if (err)
+ goto out_orphans;
+
+ /*
+ * GC LEB may contain garbage if there was an unclean
+ * reboot, and it should be un-mapped.
+ */
+ err = ubifs_leb_unmap(c, c->gc_lnum);
+ if (err)
+ return err;
+ }
err = dbg_check_lprops(c);
if (err)
@@ -1298,6 +1296,16 @@ static int mount_ubifs(struct ubifs_info *c)
err = ubifs_recover_size(c);
if (err)
goto out_orphans;
+ } else {
+ /*
+ * Even if we mount read-only, we have to set space in GC LEB
+ * to proper value because this affects UBIFS free space
+ * reporting. We do not want to have a situation when
+ * re-mounting from R/O to R/W changes amount of free space.
+ */
+ err = take_gc_lnum(c);
+ if (err)
+ goto out_orphans;
}
spin_lock(&ubifs_infos_lock);
@@ -1310,14 +1318,17 @@ static int mount_ubifs(struct ubifs_info *c)
else {
c->need_recovery = 0;
ubifs_msg("recovery completed");
+ /* GC LEB has to be empty and taken at this point */
+ ubifs_assert(c->lst.taken_empty_lebs == 1);
}
- }
+ } else
+ ubifs_assert(c->lst.taken_empty_lebs == 1);
- err = dbg_debugfs_init_fs(c);
+ err = dbg_check_filesystem(c);
if (err)
goto out_infos;
- err = dbg_check_filesystem(c);
+ err = dbg_debugfs_init_fs(c);
if (err)
goto out_infos;
@@ -1351,7 +1362,6 @@ static int mount_ubifs(struct ubifs_info *c)
c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7],
c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11],
c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]);
- dbg_msg("fast unmount: %d", c->fast_unmount);
dbg_msg("big_lpt %d", c->big_lpt);
dbg_msg("log LEBs: %d (%d - %d)",
c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
@@ -1475,10 +1485,8 @@ static int ubifs_remount_rw(struct ubifs_info *c)
{
int err, lnum;
- if (c->ro_media)
- return -EINVAL;
-
mutex_lock(&c->umount_mutex);
+ dbg_save_space_info(c);
c->remounting_rw = 1;
c->always_chk_crc = 1;
@@ -1514,6 +1522,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
err = ubifs_recover_inl_heads(c, c->sbuf);
if (err)
goto out;
+ } else {
+ /* A readonly mount is not allowed to have orphans */
+ ubifs_assert(c->tot_orphans == 0);
+ err = ubifs_clear_orphans(c);
+ if (err)
+ goto out;
}
if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) {
@@ -1569,7 +1583,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
if (c->need_recovery)
err = ubifs_rcvry_gc_commit(c);
else
- err = take_gc_lnum(c);
+ err = ubifs_leb_unmap(c, c->gc_lnum);
if (err)
goto out;
@@ -1582,8 +1596,9 @@ static int ubifs_remount_rw(struct ubifs_info *c)
c->vfs_sb->s_flags &= ~MS_RDONLY;
c->remounting_rw = 0;
c->always_chk_crc = 0;
+ err = dbg_check_space_info(c);
mutex_unlock(&c->umount_mutex);
- return 0;
+ return err;
out:
vfree(c->orph_buf);
@@ -1603,43 +1618,18 @@ out:
}
/**
- * commit_on_unmount - commit the journal when un-mounting.
- * @c: UBIFS file-system description object
- *
- * This function is called during un-mounting and re-mounting, and it commits
- * the journal unless the "fast unmount" mode is enabled.
- */
-static void commit_on_unmount(struct ubifs_info *c)
-{
- struct super_block *sb = c->vfs_sb;
- long long bud_bytes;
-
- /*
- * This function is called before the background thread is stopped, so
- * we may race with ongoing commit, which means we have to take
- * @c->bud_lock to access @c->bud_bytes.
- */
- spin_lock(&c->buds_lock);
- bud_bytes = c->bud_bytes;
- spin_unlock(&c->buds_lock);
-
- if (!c->fast_unmount && !(sb->s_flags & MS_RDONLY) && bud_bytes)
- ubifs_run_commit(c);
-}
-
-/**
* ubifs_remount_ro - re-mount in read-only mode.
* @c: UBIFS file-system description object
*
- * We rely on VFS to have stopped writing. Possibly the background thread could
- * be running a commit, however kthread_stop will wait in that case.
+ * We assume VFS has stopped writing. Possibly the background thread could be
+ * running a commit, however kthread_stop will wait in that case.
*/
static void ubifs_remount_ro(struct ubifs_info *c)
{
int i, err;
ubifs_assert(!c->need_recovery);
- commit_on_unmount(c);
+ ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
mutex_lock(&c->umount_mutex);
if (c->bgt) {
@@ -1647,27 +1637,29 @@ static void ubifs_remount_ro(struct ubifs_info *c)
c->bgt = NULL;
}
+ dbg_save_space_info(c);
+
for (i = 0; i < c->jhead_cnt; i++) {
ubifs_wbuf_sync(&c->jheads[i].wbuf);
del_timer_sync(&c->jheads[i].wbuf.timer);
}
- if (!c->ro_media) {
- c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
- c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
- c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
- err = ubifs_write_master(c);
- if (err)
- ubifs_ro_mode(c, err);
- }
+ c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+ c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
+ err = ubifs_write_master(c);
+ if (err)
+ ubifs_ro_mode(c, err);
- ubifs_destroy_idx_gc(c);
free_wbufs(c);
vfree(c->orph_buf);
c->orph_buf = NULL;
vfree(c->ileb_buf);
c->ileb_buf = NULL;
ubifs_lpt_free(c, 1);
+ err = dbg_check_space_info(c);
+ if (err)
+ ubifs_ro_mode(c, err);
mutex_unlock(&c->umount_mutex);
}
@@ -1760,11 +1752,20 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
}
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
+ if (c->ro_media) {
+ ubifs_msg("cannot re-mount due to prior errors");
+ return -EROFS;
+ }
err = ubifs_remount_rw(c);
if (err)
return err;
- } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY))
+ } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
+ if (c->ro_media) {
+ ubifs_msg("cannot re-mount due to prior errors");
+ return -EROFS;
+ }
ubifs_remount_ro(c);
+ }
if (c->bulk_read == 1)
bu_init(c);
@@ -1774,10 +1775,11 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
c->bu.buf = NULL;
}
+ ubifs_assert(c->lst.taken_empty_lebs == 1);
return 0;
}
-struct super_operations ubifs_super_operations = {
+const struct super_operations ubifs_super_operations = {
.alloc_inode = ubifs_alloc_inode,
.destroy_inode = ubifs_destroy_inode,
.put_super = ubifs_put_super,
@@ -2044,15 +2046,6 @@ out_close:
static void ubifs_kill_sb(struct super_block *sb)
{
- struct ubifs_info *c = sb->s_fs_info;
-
- /*
- * We do 'commit_on_unmount()' here instead of 'ubifs_put_super()'
- * in order to be outside BKL.
- */
- if (sb->s_root)
- commit_on_unmount(c);
- /* The un-mount routine is actually done in put_super() */
generic_shutdown_super(sb);
}
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index f7e36f54552..fa28a84c6a1 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -443,6 +443,11 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
* This function performs that same function as ubifs_read_node except that
* it does not require that there is actually a node present and instead
* the return code indicates if a node was read.
+ *
+ * Note, this function does not check CRC of data nodes if @c->no_chk_data_crc
+ * is true (it is controlled by corresponding mount option). However, if
+ * @c->always_chk_crc is true, @c->no_chk_data_crc is ignored and CRC is always
+ * checked.
*/
static int try_read_node(const struct ubifs_info *c, void *buf, int type,
int len, int lnum, int offs)
@@ -470,9 +475,8 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type,
if (node_len != len)
return 0;
- if (type == UBIFS_DATA_NODE && !c->always_chk_crc)
- if (c->no_chk_data_crc)
- return 0;
+ if (type == UBIFS_DATA_NODE && !c->always_chk_crc && c->no_chk_data_crc)
+ return 1;
crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
node_crc = le32_to_cpu(ch->crc);
@@ -1506,7 +1510,7 @@ out:
*
* Note, if the bulk-read buffer length (@bu->buf_len) is known, this function
* makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares
- * maxumum possible amount of nodes for bulk-read.
+ * maximum possible amount of nodes for bulk-read.
*/
int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
{
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index fc2a4cc66d0..039a68bee29 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -426,9 +426,9 @@ struct ubifs_unclean_leb {
* LEB properties flags.
*
* LPROPS_UNCAT: not categorized
- * LPROPS_DIRTY: dirty > 0, not index
+ * LPROPS_DIRTY: dirty > free, dirty >= @c->dead_wm, not index
* LPROPS_DIRTY_IDX: dirty + free > @c->min_idx_node_sze and index
- * LPROPS_FREE: free > 0, not empty, not index
+ * LPROPS_FREE: free > 0, dirty < @c->dead_wm, not empty, not index
* LPROPS_HEAP_CNT: number of heaps used for storing categorized LEBs
* LPROPS_EMPTY: LEB is empty, not taken
* LPROPS_FREEABLE: free + dirty == leb_size, not index, not taken
@@ -961,7 +961,6 @@ struct ubifs_debug_info;
* @cs_lock: commit state lock
* @cmt_wq: wait queue to sleep on if the log is full and a commit is running
*
- * @fast_unmount: do not run journal commit before un-mounting
* @big_lpt: flag that LPT is too big to write whole during commit
* @no_chk_data_crc: do not check CRCs when reading data nodes (except during
* recovery)
@@ -1202,7 +1201,6 @@ struct ubifs_info {
spinlock_t cs_lock;
wait_queue_head_t cmt_wq;
- unsigned int fast_unmount:1;
unsigned int big_lpt:1;
unsigned int no_chk_data_crc:1;
unsigned int bulk_read:1;
@@ -1405,13 +1403,13 @@ extern struct list_head ubifs_infos;
extern spinlock_t ubifs_infos_lock;
extern atomic_long_t ubifs_clean_zn_cnt;
extern struct kmem_cache *ubifs_inode_slab;
-extern struct super_operations ubifs_super_operations;
-extern struct address_space_operations ubifs_file_address_operations;
-extern struct file_operations ubifs_file_operations;
-extern struct inode_operations ubifs_file_inode_operations;
-extern struct file_operations ubifs_dir_operations;
-extern struct inode_operations ubifs_dir_inode_operations;
-extern struct inode_operations ubifs_symlink_inode_operations;
+extern const struct super_operations ubifs_super_operations;
+extern const struct address_space_operations ubifs_file_address_operations;
+extern const struct file_operations ubifs_file_operations;
+extern const struct inode_operations ubifs_file_inode_operations;
+extern const struct file_operations ubifs_dir_operations;
+extern const struct inode_operations ubifs_dir_inode_operations;
+extern const struct inode_operations ubifs_symlink_inode_operations;
extern struct backing_dev_info ubifs_backing_dev_info;
extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
@@ -1428,7 +1426,7 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum,
int offs, int dtype);
int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
- int offs, int quiet, int chk_crc);
+ int offs, int quiet, int must_chk_crc);
void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad);
void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last);
int ubifs_io_init(struct ubifs_info *c);
@@ -1495,6 +1493,7 @@ void ubifs_release_ino_dirty(struct ubifs_info *c, struct inode *inode,
void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode,
struct ubifs_budget_req *req);
long long ubifs_get_free_space(struct ubifs_info *c);
+long long ubifs_get_free_space_nolock(struct ubifs_info *c);
int ubifs_calc_min_idx_lebs(struct ubifs_info *c);
void ubifs_convert_page_budget(struct ubifs_info *c);
long long ubifs_reported_space(const struct ubifs_info *c, long long free);
@@ -1603,6 +1602,7 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum);
int ubifs_orphan_start_commit(struct ubifs_info *c);
int ubifs_orphan_end_commit(struct ubifs_info *c);
int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only);
+int ubifs_clear_orphans(struct ubifs_info *c);
/* lpt.c */
int ubifs_calc_lpt_geom(struct ubifs_info *c);
@@ -1646,7 +1646,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
const struct ubifs_lprops *lp,
int free, int dirty, int flags,
int idx_gc_cnt);
-void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *stats);
+void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst);
void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
int cat);
void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops,
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d71dc44e21e..cb329edc925 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -166,6 +166,75 @@ test_page_region(
}
/*
+ * Mapping of multi-page buffers into contiguous virtual space
+ */
+
+typedef struct a_list {
+ void *vm_addr;
+ struct a_list *next;
+} a_list_t;
+
+static a_list_t *as_free_head;
+static int as_list_len;
+static DEFINE_SPINLOCK(as_lock);
+
+/*
+ * Try to batch vunmaps because they are costly.
+ */
+STATIC void
+free_address(
+ void *addr)
+{
+ a_list_t *aentry;
+
+#ifdef CONFIG_XEN
+ /*
+ * Xen needs to be able to make sure it can get an exclusive
+ * RO mapping of pages it wants to turn into a pagetable. If
+ * a newly allocated page is also still being vmap()ed by xfs,
+ * it will cause pagetable construction to fail. This is a
+ * quick workaround to always eagerly unmap pages so that Xen
+ * is happy.
+ */
+ vunmap(addr);
+ return;
+#endif
+
+ aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
+ if (likely(aentry)) {
+ spin_lock(&as_lock);
+ aentry->next = as_free_head;
+ aentry->vm_addr = addr;
+ as_free_head = aentry;
+ as_list_len++;
+ spin_unlock(&as_lock);
+ } else {
+ vunmap(addr);
+ }
+}
+
+STATIC void
+purge_addresses(void)
+{
+ a_list_t *aentry, *old;
+
+ if (as_free_head == NULL)
+ return;
+
+ spin_lock(&as_lock);
+ aentry = as_free_head;
+ as_free_head = NULL;
+ as_list_len = 0;
+ spin_unlock(&as_lock);
+
+ while ((old = aentry) != NULL) {
+ vunmap(aentry->vm_addr);
+ aentry = aentry->next;
+ kfree(old);
+ }
+}
+
+/*
* Internal xfs_buf_t object manipulation
*/
@@ -264,7 +333,7 @@ xfs_buf_free(
uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
- vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
+ free_address(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
@@ -386,8 +455,10 @@ _xfs_buf_map_pages(
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
- bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
- -1, PAGE_KERNEL);
+ if (as_list_len > 64)
+ purge_addresses();
+ bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
+ VM_MAP, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL))
return -ENOMEM;
bp->b_addr += bp->b_offset;
@@ -1672,6 +1743,8 @@ xfsbufd(
count++;
}
+ if (as_list_len > 0)
+ purge_addresses();
if (count)
blk_run_address_space(target->bt_mapping);
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 2ed035354c2..a608e72fa40 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -371,7 +371,11 @@ xfs_quiesce_attr(
/* flush inodes and push all remaining buffers out to disk */
xfs_quiesce_fs(mp);
- ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
+ /*
+ * Just warn here till VFS can correctly support
+ * read-only remount without racing.
+ */
+ WARN_ON(atomic_read(&mp->m_active_trans) != 0);
/* Push the superblock and write an unmount record */
error = xfs_log_sbcount(mp, 1);
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index b4c1ee71349..f8278cfcc1d 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -55,17 +55,11 @@ xfs_swapext(
struct file *file, *target_file;
int error = 0;
- sxp = kmem_alloc(sizeof(xfs_swapext_t), KM_MAYFAIL);
- if (!sxp) {
- error = XFS_ERROR(ENOMEM);
- goto out;
- }
-
/* Pull information for the target fd */
file = fget((int)sxp->sx_fdtarget);
if (!file) {
error = XFS_ERROR(EINVAL);
- goto out_free_sxp;
+ goto out;
}
if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) {
@@ -109,8 +103,6 @@ xfs_swapext(
fput(target_file);
out_put_file:
fput(file);
- out_free_sxp:
- kmem_free(sxp);
out:
return error;
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 35cca98bd94..b1047de2fff 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -70,16 +70,21 @@ STATIC void xlog_recover_check_summary(xlog_t *);
xfs_buf_t *
xlog_get_bp(
xlog_t *log,
- int num_bblks)
+ int nbblks)
{
- ASSERT(num_bblks > 0);
+ if (nbblks <= 0 || nbblks > log->l_logBBsize) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
+ XFS_ERROR_REPORT("xlog_get_bp(1)",
+ XFS_ERRLEVEL_HIGH, log->l_mp);
+ return NULL;
+ }
if (log->l_sectbb_log) {
- if (num_bblks > 1)
- num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
- num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
+ if (nbblks > 1)
+ nbblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
+ nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
}
- return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
+ return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
}
void
@@ -102,6 +107,13 @@ xlog_bread(
{
int error;
+ if (nbblks <= 0 || nbblks > log->l_logBBsize) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
+ XFS_ERROR_REPORT("xlog_bread(1)",
+ XFS_ERRLEVEL_HIGH, log->l_mp);
+ return EFSCORRUPTED;
+ }
+
if (log->l_sectbb_log) {
blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
@@ -139,6 +151,13 @@ xlog_bwrite(
{
int error;
+ if (nbblks <= 0 || nbblks > log->l_logBBsize) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
+ XFS_ERROR_REPORT("xlog_bwrite(1)",
+ XFS_ERRLEVEL_HIGH, log->l_mp);
+ return EFSCORRUPTED;
+ }
+
if (log->l_sectbb_log) {
blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);