diff options
Diffstat (limited to 'fs/ocfs2')
54 files changed, 3072 insertions, 1597 deletions
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 7d3be845a61..9fb8132f19b 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -16,6 +16,7 @@ ocfs2-objs := \ file.o \ heartbeat.o \ inode.o \ + ioctl.o \ journal.o \ localalloc.o \ mmap.o \ diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index edaab05a93e..f43bc5f18a3 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1717,17 +1717,29 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, ocfs2_remove_from_cache(inode, eb_bh); - BUG_ON(eb->h_suballoc_slot); BUG_ON(el->l_recs[0].e_clusters); BUG_ON(el->l_recs[0].e_cpos); BUG_ON(el->l_recs[0].e_blkno); - status = ocfs2_free_extent_block(handle, - tc->tc_ext_alloc_inode, - tc->tc_ext_alloc_bh, - eb); - if (status < 0) { - mlog_errno(status); - goto bail; + if (eb->h_suballoc_slot == 0) { + /* + * This code only understands how to + * lock the suballocator in slot 0, + * which is fine because allocation is + * only ever done out of that + * suballocator too. A future version + * might change that however, so avoid + * a free if we don't know how to + * handle it. This way an fs incompat + * bit will not be necessary. + */ + status = ocfs2_free_extent_block(handle, + tc->tc_ext_alloc_inode, + tc->tc_ext_alloc_bh, + eb); + if (status < 0) { + mlog_errno(status); + goto bail; + } } } brelse(eb_bh); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 47152bf9a7f..3d7c082a8f5 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -391,31 +391,28 @@ out: static int ocfs2_commit_write(struct file *file, struct page *page, unsigned from, unsigned to) { - int ret, extending = 0, locklevel = 0; - loff_t new_i_size; + int ret; struct buffer_head *di_bh = NULL; struct inode *inode = page->mapping->host; struct ocfs2_journal_handle *handle = NULL; + struct ocfs2_dinode *di; mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to); /* NOTE: ocfs2_file_aio_write has ensured that it's safe for - * us to sample inode->i_size here without the metadata lock: + * us to continue here without rechecking the I/O against + * changed inode values. * * 1) We're currently holding the inode alloc lock, so no * nodes can change it underneath us. * * 2) We've had to take the metadata lock at least once - * already to check for extending writes, hence insuring - * that our current copy is also up to date. + * already to check for extending writes, suid removal, etc. + * The meta data update code then ensures that we don't get a + * stale inode allocation image (i_size, i_clusters, etc). */ - new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; - if (new_i_size > i_size_read(inode)) { - extending = 1; - locklevel = 1; - } - ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, locklevel, page); + ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, 1, page); if (ret != 0) { mlog_errno(ret); goto out; @@ -427,23 +424,20 @@ static int ocfs2_commit_write(struct file *file, struct page *page, goto out_unlock_meta; } - if (extending) { - handle = ocfs2_start_walk_page_trans(inode, page, from, to); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - handle = NULL; - goto out_unlock_data; - } + handle = ocfs2_start_walk_page_trans(inode, page, from, to); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out_unlock_data; + } - /* Mark our buffer early. We'd rather catch this error up here - * as opposed to after a successful commit_write which would - * require us to set back inode->i_size. */ - ret = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret < 0) { - mlog_errno(ret); - goto out_commit; - } + /* Mark our buffer early. We'd rather catch this error up here + * as opposed to after a successful commit_write which would + * require us to set back inode->i_size. */ + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; } /* might update i_size */ @@ -453,37 +447,28 @@ static int ocfs2_commit_write(struct file *file, struct page *page, goto out_commit; } - if (extending) { - loff_t size = (u64) i_size_read(inode); - struct ocfs2_dinode *di = - (struct ocfs2_dinode *)di_bh->b_data; + di = (struct ocfs2_dinode *)di_bh->b_data; - /* ocfs2_mark_inode_dirty is too heavy to use here. */ - inode->i_blocks = ocfs2_align_bytes_to_sectors(size); - inode->i_ctime = inode->i_mtime = CURRENT_TIME; + /* ocfs2_mark_inode_dirty() is too heavy to use here. */ + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); + di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); - di->i_size = cpu_to_le64(size); - di->i_ctime = di->i_mtime = - cpu_to_le64(inode->i_mtime.tv_sec); - di->i_ctime_nsec = di->i_mtime_nsec = - cpu_to_le32(inode->i_mtime.tv_nsec); + inode->i_blocks = ocfs2_align_bytes_to_sectors((u64)(i_size_read(inode))); + di->i_size = cpu_to_le64((u64)i_size_read(inode)); - ret = ocfs2_journal_dirty(handle, di_bh); - if (ret < 0) { - mlog_errno(ret); - goto out_commit; - } + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; } - BUG_ON(extending && (i_size_read(inode) != new_i_size)); - out_commit: - if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(handle); out_unlock_data: ocfs2_data_unlock(inode, 1); out_unlock_meta: - ocfs2_meta_unlock(inode, locklevel); + ocfs2_meta_unlock(inode, 1); out: if (di_bh) brelse(di_bh); @@ -558,16 +543,9 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, u64 vbo_max; /* file offset, max_blocks from iblock */ u64 p_blkno; int contig_blocks; - unsigned char blocksize_bits; + unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; - if (!inode || !bh_result) { - mlog(ML_ERROR, "inode or bh_result is null\n"); - return -EIO; - } - - blocksize_bits = inode->i_sb->s_blocksize_bits; - /* This function won't even be called if the request isn't all * nicely aligned and of the right size, so there's no need * for us to check any of that. */ @@ -666,7 +644,7 @@ out: return ret; } -struct address_space_operations ocfs2_aops = { +const struct address_space_operations ocfs2_aops = { .readpage = ocfs2_readpage, .writepage = ocfs2_writepage, .prepare_write = ocfs2_prepare_write, diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 9a24adf9be6..c9037414f4f 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -100,6 +100,9 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, mlog_entry("(block=(%llu), nr=(%d), flags=%d, inode=%p)\n", (unsigned long long)block, nr, flags, inode); + BUG_ON((flags & OCFS2_BH_READAHEAD) && + (!inode || !(flags & OCFS2_BH_CACHED))); + if (osb == NULL || osb->sb == NULL || bhs == NULL) { status = -EINVAL; mlog_errno(status); @@ -140,6 +143,30 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, bh = bhs[i]; ignore_cache = 0; + /* There are three read-ahead cases here which we need to + * be concerned with. All three assume a buffer has + * previously been submitted with OCFS2_BH_READAHEAD + * and it hasn't yet completed I/O. + * + * 1) The current request is sync to disk. This rarely + * happens these days, and never when performance + * matters - the code can just wait on the buffer + * lock and re-submit. + * + * 2) The current request is cached, but not + * readahead. ocfs2_buffer_uptodate() will return + * false anyway, so we'll wind up waiting on the + * buffer lock to do I/O. We re-check the request + * with after getting the lock to avoid a re-submit. + * + * 3) The current request is readahead (and so must + * also be a caching one). We short circuit if the + * buffer is locked (under I/O) and if it's in the + * uptodate cache. The re-check from #2 catches the + * case that the previous read-ahead completes just + * before our is-it-in-flight check. + */ + if (flags & OCFS2_BH_CACHED && !ocfs2_buffer_uptodate(inode, bh)) { mlog(ML_UPTODATE, @@ -169,6 +196,14 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, continue; } + /* A read-ahead request was made - if the + * buffer is already under read-ahead from a + * previously submitted request than we are + * done here. */ + if ((flags & OCFS2_BH_READAHEAD) + && ocfs2_buffer_read_ahead(inode, bh)) + continue; + lock_buffer(bh); if (buffer_jbd(bh)) { #ifdef CATCH_BH_JBD_RACES @@ -181,13 +216,22 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, continue; #endif } + + /* Re-check ocfs2_buffer_uptodate() as a + * previously read-ahead buffer may have + * completed I/O while we were waiting for the + * buffer lock. */ + if ((flags & OCFS2_BH_CACHED) + && !(flags & OCFS2_BH_READAHEAD) + && ocfs2_buffer_uptodate(inode, bh)) { + unlock_buffer(bh); + continue; + } + clear_buffer_uptodate(bh); get_bh(bh); /* for end_buffer_read_sync() */ bh->b_end_io = end_buffer_read_sync; - if (flags & OCFS2_BH_READAHEAD) - submit_bh(READA, bh); - else - submit_bh(READ, bh); + submit_bh(READ, bh); continue; } } @@ -197,34 +241,39 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, for (i = (nr - 1); i >= 0; i--) { bh = bhs[i]; - /* We know this can't have changed as we hold the - * inode sem. Avoid doing any work on the bh if the - * journal has it. */ - if (!buffer_jbd(bh)) - wait_on_buffer(bh); - - if (!buffer_uptodate(bh)) { - /* Status won't be cleared from here on out, - * so we can safely record this and loop back - * to cleanup the other buffers. Don't need to - * remove the clustered uptodate information - * for this bh as it's not marked locally - * uptodate. */ - status = -EIO; - brelse(bh); - bhs[i] = NULL; - continue; + if (!(flags & OCFS2_BH_READAHEAD)) { + /* We know this can't have changed as we hold the + * inode sem. Avoid doing any work on the bh if the + * journal has it. */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); + + if (!buffer_uptodate(bh)) { + /* Status won't be cleared from here on out, + * so we can safely record this and loop back + * to cleanup the other buffers. Don't need to + * remove the clustered uptodate information + * for this bh as it's not marked locally + * uptodate. */ + status = -EIO; + brelse(bh); + bhs[i] = NULL; + continue; + } } + /* Always set the buffer in the cache, even if it was + * a forced read, or read-ahead which hasn't yet + * completed. */ if (inode) ocfs2_set_buffer_uptodate(inode, bh); } if (inode) mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); - mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s\n", + mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", (unsigned long long)block, nr, - (!(flags & OCFS2_BH_CACHED) || ignore_cache) ? "no" : "yes"); + (!(flags & OCFS2_BH_CACHED) || ignore_cache) ? "no" : "yes", flags); bail: diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h index 6ecb90937b6..6cc20930fac 100644 --- a/fs/ocfs2/buffer_head_io.h +++ b/fs/ocfs2/buffer_head_io.h @@ -49,7 +49,7 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, #define OCFS2_BH_CACHED 1 -#define OCFS2_BH_READAHEAD 8 /* use this to pass READA down to submit_bh */ +#define OCFS2_BH_READAHEAD 8 static inline int ocfs2_read_block(struct ocfs2_super * osb, u64 off, struct buffer_head **bh, int flags, diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 21f38accd03..305cba3681f 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -54,7 +54,7 @@ static DECLARE_RWSEM(o2hb_callback_sem); * multiple hb threads are watching multiple regions. A node is live * whenever any of the threads sees activity from the node in its region. */ -static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(o2hb_live_lock); static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; static LIST_HEAD(o2hb_node_events); @@ -320,8 +320,12 @@ static int compute_max_sectors(struct block_device *bdev) max_pages = q->max_hw_segments; max_pages--; /* Handle I/Os that straddle a page */ - max_sectors = max_pages << (PAGE_SHIFT - 9); - + if (max_pages) { + max_sectors = max_pages << (PAGE_SHIFT - 9); + } else { + /* If BIO contains 1 or less than 1 page. */ + max_sectors = q->max_sectors; + } /* Why is fls() 1-based???? */ pow_two_sectors = 1 << (fls(max_sectors) - 1); @@ -517,6 +521,7 @@ static inline void o2hb_prepare_block(struct o2hb_region *reg, hb_block->hb_seq = cpu_to_le64(cputime); hb_block->hb_node = node_num; hb_block->hb_generation = cpu_to_le64(generation); + hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS); /* This step must always happen last! */ hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg, @@ -645,6 +650,8 @@ static int o2hb_check_slot(struct o2hb_region *reg, struct o2nm_node *node; struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block; u64 cputime; + unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS; + unsigned int slot_dead_ms; memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); @@ -733,6 +740,23 @@ fire_callbacks: &o2hb_live_slots[slot->ds_node_num]); slot->ds_equal_samples = 0; + + /* We want to be sure that all nodes agree on the + * number of milliseconds before a node will be + * considered dead. The self-fencing timeout is + * computed from this value, and a discrepancy might + * result in heartbeat calling a node dead when it + * hasn't self-fenced yet. */ + slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms); + if (slot_dead_ms && slot_dead_ms != dead_ms) { + /* TODO: Perhaps we can fail the region here. */ + mlog(ML_ERROR, "Node %d on device %s has a dead count " + "of %u ms, but our count is %u ms.\n" + "Please double check your configuration values " + "for 'O2CB_HEARTBEAT_THRESHOLD'\n", + slot->ds_node_num, reg->hr_dev_name, slot_dead_ms, + dead_ms); + } goto out; } diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index 73edad78253..a42628ba9dd 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -123,6 +123,17 @@ #define MLOG_MASK_PREFIX 0 #endif +/* + * When logging is disabled, force the bit test to 0 for anything other + * than errors and notices, allowing gcc to remove the code completely. + * When enabled, allow all masks. + */ +#if defined(CONFIG_OCFS2_DEBUG_MASKLOG) +#define ML_ALLOWED_BITS ~0 +#else +#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE) +#endif + #define MLOG_MAX_BITS 64 struct mlog_bits { @@ -187,7 +198,8 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits; #define mlog(mask, fmt, args...) do { \ u64 __m = MLOG_MASK_PREFIX | (mask); \ - if (__mlog_test_u64(__m, mlog_and_bits) && \ + if ((__m & ML_ALLOWED_BITS) && \ + __mlog_test_u64(__m, mlog_and_bits) && \ !__mlog_test_u64(__m, mlog_not_bits)) { \ if (__m & ML_ERROR) \ __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \ @@ -204,6 +216,7 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits; mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ } while (0) +#if defined(CONFIG_OCFS2_DEBUG_MASKLOG) #define mlog_entry(fmt, args...) do { \ mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \ } while (0) @@ -247,6 +260,13 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits; #define mlog_exit_void() do { \ mlog(ML_EXIT, "EXIT\n"); \ } while (0) +#else +#define mlog_entry(...) do { } while (0) +#define mlog_entry_void(...) do { } while (0) +#define mlog_exit(...) do { } while (0) +#define mlog_exit_ptr(...) do { } while (0) +#define mlog_exit_void(...) do { } while (0) +#endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */ #define mlog_bug_on_msg(cond, fmt, args...) do { \ if (cond) { \ diff --git a/fs/ocfs2/cluster/ocfs2_heartbeat.h b/fs/ocfs2/cluster/ocfs2_heartbeat.h index 94096069cb4..3f4151da970 100644 --- a/fs/ocfs2/cluster/ocfs2_heartbeat.h +++ b/fs/ocfs2/cluster/ocfs2_heartbeat.h @@ -32,6 +32,7 @@ struct o2hb_disk_heartbeat_block { __u8 hb_pad1[3]; __le32 hb_cksum; __le64 hb_generation; + __le32 hb_dead_ms; }; #endif /* _OCFS2_HEARTBEAT_H */ diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 0f60cc0d398..b650efa8c8b 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -108,7 +108,7 @@ ##args); \ } while (0) -static rwlock_t o2net_handler_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(o2net_handler_lock); static struct rb_root o2net_handler_tree = RB_ROOT; static struct o2net_node o2net_nodes[O2NM_MAX_NODES]; @@ -396,8 +396,8 @@ static void o2net_set_nn_state(struct o2net_node *nn, } if (was_valid && !valid) { - mlog(ML_NOTICE, "no longer connected to " SC_NODEF_FMT "\n", - SC_NODEF_ARGS(old_sc)); + printk(KERN_INFO "o2net: no longer connected to " + SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); o2net_complete_nodes_nsw(nn); } @@ -409,10 +409,10 @@ static void o2net_set_nn_state(struct o2net_node *nn, * the only way to start connecting again is to down * heartbeat and bring it back up. */ cancel_delayed_work(&nn->nn_connect_expired); - mlog(ML_NOTICE, "%s " SC_NODEF_FMT "\n", - o2nm_this_node() > sc->sc_node->nd_num ? - "connected to" : "accepted connection from", - SC_NODEF_ARGS(sc)); + printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", + o2nm_this_node() > sc->sc_node->nd_num ? + "connected to" : "accepted connection from", + SC_NODEF_ARGS(sc)); } /* trigger the connecting worker func as long as we're not valid, @@ -1280,7 +1280,7 @@ static void o2net_idle_timer(unsigned long data) do_gettimeofday(&now); - mlog(ML_NOTICE, "connection to " SC_NODEF_FMT " has been idle for 10 " + printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for 10 " "seconds, shutting it down.\n", SC_NODEF_ARGS(sc)); mlog(ML_NOTICE, "here are some times that might help debug the " "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index ff9e2e2104c..4b46aac7d24 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -44,11 +44,17 @@ * locking semantics of the file system using the protocol. It should * be somewhere else, I'm sure, but right now it isn't. * + * New in version 4: + * - Remove i_generation from lock names for better stat performance. + * + * New in version 3: + * - Replace dentry votes with a cluster lock + * * New in version 2: * - full 64 bit i_size in the metadata lock lvbs * - introduction of "rw" lock and pushing meta/data locking down */ -#define O2NET_PROTOCOL_VERSION 2ULL +#define O2NET_PROTOCOL_VERSION 4ULL struct o2net_handshake { __be64 protocol_version; __be64 connector_id; diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 1a01380e387..014e73978da 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -35,15 +35,17 @@ #include "alloc.h" #include "dcache.h" +#include "dlmglue.h" #include "file.h" #include "inode.h" + static int ocfs2_dentry_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; int ret = 0; /* if all else fails, just return false */ - struct ocfs2_super *osb; + struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); mlog_entry("(0x%p, '%.*s')\n", dentry, dentry->d_name.len, dentry->d_name.name); @@ -55,28 +57,31 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, goto bail; } - osb = OCFS2_SB(inode->i_sb); - BUG_ON(!osb); - if (inode != osb->root_inode) { - spin_lock(&OCFS2_I(inode)->ip_lock); - /* did we or someone else delete this inode? */ - if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { - spin_unlock(&OCFS2_I(inode)->ip_lock); - mlog(0, "inode (%llu) deleted, returning false\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno); - goto bail; - } + if (inode == osb->root_inode || is_bad_inode(inode)) + goto bail; + + spin_lock(&OCFS2_I(inode)->ip_lock); + /* did we or someone else delete this inode? */ + if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { spin_unlock(&OCFS2_I(inode)->ip_lock); + mlog(0, "inode (%llu) deleted, returning false\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + goto bail; + } + spin_unlock(&OCFS2_I(inode)->ip_lock); - if (!inode->i_nlink) { - mlog(0, "Inode %llu orphaned, returning false " - "dir = %d\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, - S_ISDIR(inode->i_mode)); - goto bail; - } + /* + * We don't need a cluster lock to test this because once an + * inode nlink hits zero, it never goes back. + */ + if (inode->i_nlink == 0) { + mlog(0, "Inode %llu orphaned, returning false " + "dir = %d\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + S_ISDIR(inode->i_mode)); + goto bail; } ret = 1; @@ -87,6 +92,322 @@ bail: return ret; } +static int ocfs2_match_dentry(struct dentry *dentry, + u64 parent_blkno, + int skip_unhashed) +{ + struct inode *parent; + + /* + * ocfs2_lookup() does a d_splice_alias() _before_ attaching + * to the lock data, so we skip those here, otherwise + * ocfs2_dentry_attach_lock() will get its original dentry + * back. + */ + if (!dentry->d_fsdata) + return 0; + + if (!dentry->d_parent) + return 0; + + if (skip_unhashed && d_unhashed(dentry)) + return 0; + + parent = dentry->d_parent->d_inode; + /* Negative parent dentry? */ + if (!parent) + return 0; + + /* Name is in a different directory. */ + if (OCFS2_I(parent)->ip_blkno != parent_blkno) + return 0; + + return 1; +} + +/* + * Walk the inode alias list, and find a dentry which has a given + * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it + * is looking for a dentry_lock reference. The vote thread is looking + * to unhash aliases, so we allow it to skip any that already have + * that property. + */ +struct dentry *ocfs2_find_local_alias(struct inode *inode, + u64 parent_blkno, + int skip_unhashed) +{ + struct list_head *p; + struct dentry *dentry = NULL; + + spin_lock(&dcache_lock); + + list_for_each(p, &inode->i_dentry) { + dentry = list_entry(p, struct dentry, d_alias); + + if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { + mlog(0, "dentry found: %.*s\n", + dentry->d_name.len, dentry->d_name.name); + + dget_locked(dentry); + break; + } + + dentry = NULL; + } + + spin_unlock(&dcache_lock); + + return dentry; +} + +DEFINE_SPINLOCK(dentry_attach_lock); + +/* + * Attach this dentry to a cluster lock. + * + * Dentry locks cover all links in a given directory to a particular + * inode. We do this so that ocfs2 can build a lock name which all + * nodes in the cluster can agree on at all times. Shoving full names + * in the cluster lock won't work due to size restrictions. Covering + * links inside of a directory is a good compromise because it still + * allows us to use the parent directory lock to synchronize + * operations. + * + * Call this function with the parent dir semaphore and the parent dir + * cluster lock held. + * + * The dir semaphore will protect us from having to worry about + * concurrent processes on our node trying to attach a lock at the + * same time. + * + * The dir cluster lock (held at either PR or EX mode) protects us + * from unlink and rename on other nodes. + * + * A dput() can happen asynchronously due to pruning, so we cover + * attaching and detaching the dentry lock with a + * dentry_attach_lock. + * + * A node which has done lookup on a name retains a protected read + * lock until final dput. If the user requests and unlink or rename, + * the protected read is upgraded to an exclusive lock. Other nodes + * who have seen the dentry will then be informed that they need to + * downgrade their lock, which will involve d_delete on the + * dentry. This happens in ocfs2_dentry_convert_worker(). + */ +int ocfs2_dentry_attach_lock(struct dentry *dentry, + struct inode *inode, + u64 parent_blkno) +{ + int ret; + struct dentry *alias; + struct ocfs2_dentry_lock *dl = dentry->d_fsdata; + + mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n", + dentry->d_name.len, dentry->d_name.name, + (unsigned long long)parent_blkno, dl); + + /* + * Negative dentry. We ignore these for now. + * + * XXX: Could we can improve ocfs2_dentry_revalidate() by + * tracking these? + */ + if (!inode) + return 0; + + if (dl) { + mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, + " \"%.*s\": old parent: %llu, new: %llu\n", + dentry->d_name.len, dentry->d_name.name, + (unsigned long long)parent_blkno, + (unsigned long long)dl->dl_parent_blkno); + return 0; + } + + alias = ocfs2_find_local_alias(inode, parent_blkno, 0); + if (alias) { + /* + * Great, an alias exists, which means we must have a + * dentry lock already. We can just grab the lock off + * the alias and add it to the list. + * + * We're depending here on the fact that this dentry + * was found and exists in the dcache and so must have + * a reference to the dentry_lock because we can't + * race creates. Final dput() cannot happen on it + * since we have it pinned, so our reference is safe. + */ + dl = alias->d_fsdata; + mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n", + (unsigned long long)parent_blkno, + (unsigned long long)OCFS2_I(inode)->ip_blkno); + + mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, + " \"%.*s\": old parent: %llu, new: %llu\n", + dentry->d_name.len, dentry->d_name.name, + (unsigned long long)parent_blkno, + (unsigned long long)dl->dl_parent_blkno); + + mlog(0, "Found: %s\n", dl->dl_lockres.l_name); + + goto out_attach; + } + + /* + * There are no other aliases + */ + dl = kmalloc(sizeof(*dl), GFP_NOFS); + if (!dl) { + ret = -ENOMEM; + mlog_errno(ret); + return ret; + } + + dl->dl_count = 0; + /* + * Does this have to happen below, for all attaches, in case + * the struct inode gets blown away by votes? + */ + dl->dl_inode = igrab(inode); + dl->dl_parent_blkno = parent_blkno; + ocfs2_dentry_lock_res_init(dl, parent_blkno, inode); + +out_attach: + spin_lock(&dentry_attach_lock); + dentry->d_fsdata = dl; + dl->dl_count++; + spin_unlock(&dentry_attach_lock); + + /* + * This actually gets us our PRMODE level lock. From now on, + * we'll have a notification if one of these names is + * destroyed on another node. + */ + ret = ocfs2_dentry_lock(dentry, 0); + if (!ret) + ocfs2_dentry_unlock(dentry, 0); + else + mlog_errno(ret); + + dput(alias); + + return ret; +} + +/* + * ocfs2_dentry_iput() and friends. + * + * At this point, our particular dentry is detached from the inodes + * alias list, so there's no way that the locking code can find it. + * + * The interesting stuff happens when we determine that our lock needs + * to go away because this is the last subdir alias in the + * system. This function needs to handle a couple things: + * + * 1) Synchronizing lock shutdown with the downconvert threads. This + * is already handled for us via the lockres release drop function + * called in ocfs2_release_dentry_lock() + * + * 2) A race may occur when we're doing our lock shutdown and + * another process wants to create a new dentry lock. Right now we + * let them race, which means that for a very short while, this + * node might have two locks on a lock resource. This should be a + * problem though because one of them is in the process of being + * thrown out. + */ +static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, + struct ocfs2_dentry_lock *dl) +{ + ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); + ocfs2_lock_res_free(&dl->dl_lockres); + iput(dl->dl_inode); + kfree(dl); +} + +void ocfs2_dentry_lock_put(struct ocfs2_super *osb, + struct ocfs2_dentry_lock *dl) +{ + int unlock = 0; + + BUG_ON(dl->dl_count == 0); + + spin_lock(&dentry_attach_lock); + dl->dl_count--; + unlock = !dl->dl_count; + spin_unlock(&dentry_attach_lock); + + if (unlock) + ocfs2_drop_dentry_lock(osb, dl); +} + +static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode) +{ + struct ocfs2_dentry_lock *dl = dentry->d_fsdata; + + mlog_bug_on_msg(!dl && !(dentry->d_flags & DCACHE_DISCONNECTED), + "dentry: %.*s\n", dentry->d_name.len, + dentry->d_name.name); + + if (!dl) + goto out; + + mlog_bug_on_msg(dl->dl_count == 0, "dentry: %.*s, count: %u\n", + dentry->d_name.len, dentry->d_name.name, + dl->dl_count); + + ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl); + +out: + iput(inode); +} + +/* + * d_move(), but keep the locks in sync. + * + * When we are done, "dentry" will have the parent dir and name of + * "target", which will be thrown away. + * + * We manually update the lock of "dentry" if need be. + * + * "target" doesn't have it's dentry lock touched - we allow the later + * dput() to handle this for us. + * + * This is called during ocfs2_rename(), while holding parent + * directory locks. The dentries have already been deleted on other + * nodes via ocfs2_remote_dentry_delete(). + * + * Normally, the VFS handles the d_move() for the file sytem, after + * the ->rename() callback. OCFS2 wants to handle this internally, so + * the new lock can be created atomically with respect to the cluster. + */ +void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, + struct inode *old_dir, struct inode *new_dir) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); + struct inode *inode = dentry->d_inode; + + /* + * Move within the same directory, so the actual lock info won't + * change. + * + * XXX: Is there any advantage to dropping the lock here? + */ + if (old_dir == new_dir) + goto out_move; + + ocfs2_dentry_lock_put(osb, dentry->d_fsdata); + + dentry->d_fsdata = NULL; + ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno); + if (ret) + mlog_errno(ret); + +out_move: + d_move(dentry, target); +} + struct dentry_operations ocfs2_dentry_ops = { .d_revalidate = ocfs2_dentry_revalidate, + .d_iput = ocfs2_dentry_iput, }; diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h index 90072771114..c091c34d988 100644 --- a/fs/ocfs2/dcache.h +++ b/fs/ocfs2/dcache.h @@ -28,4 +28,31 @@ extern struct dentry_operations ocfs2_dentry_ops; +struct ocfs2_dentry_lock { + unsigned int dl_count; + u64 dl_parent_blkno; + + /* + * The ocfs2_dentry_lock keeps an inode reference until + * dl_lockres has been destroyed. This is usually done in + * ->d_iput() anyway, so there should be minimal impact. + */ + struct inode *dl_inode; + struct ocfs2_lock_res dl_lockres; +}; + +int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode, + u64 parent_blkno); + +void ocfs2_dentry_lock_put(struct ocfs2_super *osb, + struct ocfs2_dentry_lock *dl); + +struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, + int skip_unhashed); + +void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, + struct inode *old_dir, struct inode *new_dir); + +extern spinlock_t dentry_attach_lock; + #endif /* OCFS2_DCACHE_H */ diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index ae47f450792..04e01915b86 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -74,14 +74,14 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; - unsigned long offset, blk; - int i, num, stored; + unsigned long offset, blk, last_ra_blk = 0; + int i, stored; struct buffer_head * bh, * tmp; struct ocfs2_dir_entry * de; int err; struct inode *inode = filp->f_dentry->d_inode; struct super_block * sb = inode->i_sb; - int have_disk_lock = 0; + unsigned int ra_sectors = 16; mlog_entry("dirino=%llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); @@ -95,9 +95,8 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) mlog_errno(error); /* we haven't got any yet, so propagate the error. */ stored = error; - goto bail; + goto bail_nolock; } - have_disk_lock = 1; offset = filp->f_pos & (sb->s_blocksize - 1); @@ -113,16 +112,21 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) continue; } - /* - * Do the readahead (8k) - */ - if (!offset) { - for (i = 16 >> (sb->s_blocksize_bits - 9), num = 0; + /* The idea here is to begin with 8k read-ahead and to stay + * 4k ahead of our current position. + * + * TODO: Use the pagecache for this. We just need to + * make sure it's cluster-safe... */ + if (!last_ra_blk + || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) { + for (i = ra_sectors >> (sb->s_blocksize_bits - 9); i > 0; i--) { tmp = ocfs2_bread(inode, ++blk, &err, 1); if (tmp) brelse(tmp); } + last_ra_blk = blk; + ra_sectors = 8; } revalidate: @@ -194,9 +198,9 @@ revalidate: stored = 0; bail: - if (have_disk_lock) - ocfs2_meta_unlock(inode, 0); + ocfs2_meta_unlock(inode, 0); +bail_nolock: mlog_exit(stored); return stored; @@ -213,11 +217,9 @@ int ocfs2_find_files_on_disk(const char *name, struct ocfs2_dir_entry **dirent) { int status = -ENOENT; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - mlog_entry("(osb=%p, parent=%llu, name='%.*s', blkno=%p, inode=%p)\n", - osb, (unsigned long long)OCFS2_I(inode)->ip_blkno, - namelen, name, blkno, inode); + mlog_entry("(name=%.*s, blkno=%p, inode=%p, dirent_bh=%p, dirent=%p)\n", + namelen, name, blkno, inode, dirent_bh, dirent); *dirent_bh = ocfs2_find_entry(name, namelen, inode, dirent); if (!*dirent_bh || !*dirent) { diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index 53652f51c0e..cfd5cb65cab 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h @@ -182,6 +182,7 @@ enum dlm_status dlmlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, int flags, const char *name, + int namelen, dlm_astlockfunc_t *ast, void *data, dlm_bastlockfunc_t *bast); diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 355593dd8ef..681046d5139 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -197,12 +197,14 @@ static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, lock->ml.node == dlm->node_num ? "master" : "remote"); memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); - } else if (lksb->flags & DLM_LKSB_PUT_LVB) { - mlog(0, "setting lvb from lockres for %s node\n", - lock->ml.node == dlm->node_num ? "master" : - "remote"); - memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); } + /* Do nothing for lvb put requests - they should be done in + * place when the lock is downconverted - otherwise we risk + * racing gets and puts which could result in old lvb data + * being propagated. We leave the put flag set and clear it + * here. In the future we might want to clear it at the time + * the put is actually done. + */ spin_unlock(&res->spinlock); } @@ -318,8 +320,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) res = dlm_lookup_lockres(dlm, name, locklen); if (!res) { - mlog(ML_ERROR, "got %sast for unknown lockres! " - "cookie=%u:%llu, name=%.*s, namelen=%u\n", + mlog(0, "got %sast for unknown lockres! " + "cookie=%u:%llu, name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b", dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_seq(cookie), @@ -365,12 +367,10 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) goto do_ast; } - mlog(ML_ERROR, "got %sast for unknown lock! cookie=%u:%llu, " - "name=%.*s, namelen=%u\n", - past->type == DLM_AST ? "" : "b", - dlm_get_lock_cookie_node(cookie), - dlm_get_lock_cookie_seq(cookie), - locklen, name, locklen); + mlog(0, "got %sast for unknown lock! cookie=%u:%llu, " + "name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b", + dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_seq(cookie), + locklen, name, locklen); ret = DLM_NORMAL; unlock_out: @@ -381,8 +381,7 @@ do_ast: ret = DLM_NORMAL; if (past->type == DLM_AST) { /* do not alter lock refcount. switching lists. */ - list_del_init(&lock->list); - list_add_tail(&lock->list, &res->granted); + list_move_tail(&lock->list, &res->granted); mlog(0, "ast: adding to granted list... type=%d, " "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); if (lock->ml.convert_type != LKM_IVMODE) { @@ -463,7 +462,7 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, mlog(ML_ERROR, "sent AST to node %u, it returned " "DLM_MIGRATING!\n", lock->ml.node); BUG(); - } else if (status != DLM_NORMAL) { + } else if (status != DLM_NORMAL && status != DLM_IVLOCKID) { mlog(ML_ERROR, "AST to node %u returned %d!\n", lock->ml.node, status); /* ignore it */ diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 88cc43df18f..fa968180b07 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -37,7 +37,17 @@ #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes #define DLM_THREAD_MS 200 // flush at least every 200 ms -#define DLM_HASH_BUCKETS (PAGE_SIZE / sizeof(struct hlist_head)) +#define DLM_HASH_SIZE_DEFAULT (1 << 14) +#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE +# define DLM_HASH_PAGES 1 +#else +# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE) +#endif +#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head)) +#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE) + +/* Intended to make it easier for us to switch out hash functions */ +#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) enum dlm_ast_type { DLM_AST = 0, @@ -61,7 +71,8 @@ static inline int dlm_is_recovery_lock(const char *lock_name, int name_len) return 0; } -#define DLM_RECO_STATE_ACTIVE 0x0001 +#define DLM_RECO_STATE_ACTIVE 0x0001 +#define DLM_RECO_STATE_FINALIZE 0x0002 struct dlm_recovery_ctxt { @@ -85,7 +96,7 @@ enum dlm_ctxt_state { struct dlm_ctxt { struct list_head list; - struct hlist_head *lockres_hash; + struct hlist_head **lockres_hash; struct list_head dirty_list; struct list_head purge_list; struct list_head pending_asts; @@ -120,6 +131,7 @@ struct dlm_ctxt struct o2hb_callback_func dlm_hb_down; struct task_struct *dlm_thread_task; struct task_struct *dlm_reco_thread_task; + struct workqueue_struct *dlm_worker; wait_queue_head_t dlm_thread_wq; wait_queue_head_t dlm_reco_thread_wq; wait_queue_head_t ast_wq; @@ -132,6 +144,11 @@ struct dlm_ctxt struct list_head dlm_eviction_callbacks; }; +static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) +{ + return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE); +} + /* these keventd work queue items are for less-frequently * called functions that cannot be directly called from the * net message handlers for some reason, usually because @@ -216,20 +233,29 @@ struct dlm_lock_resource /* WARNING: Please see the comment in dlm_init_lockres before * adding fields here. */ struct hlist_node hash_node; + struct qstr lockname; struct kref refs; - /* please keep these next 3 in this order - * some funcs want to iterate over all lists */ + /* + * Please keep granted, converting, and blocked in this order, + * as some funcs want to iterate over all lists. + * + * All four lists are protected by the hash's reference. + */ struct list_head granted; struct list_head converting; struct list_head blocked; + struct list_head purge; + /* + * These two lists require you to hold an additional reference + * while they are on the list. + */ struct list_head dirty; struct list_head recovering; // dlm_recovery_ctxt.resources list /* unused lock resources have their last_used stamped and are * put on a list for the dlm thread to run. */ - struct list_head purge; unsigned long last_used; unsigned migration_pending:1; @@ -238,7 +264,6 @@ struct dlm_lock_resource wait_queue_head_t wq; u8 owner; //node which owns the lock resource, or unknown u16 state; - struct qstr lockname; char lvb[DLM_LVB_LEN]; }; @@ -300,6 +325,15 @@ enum dlm_lockres_list { DLM_BLOCKED_LIST }; +static inline int dlm_lvb_is_empty(char *lvb) +{ + int i; + for (i=0; i<DLM_LVB_LEN; i++) + if (lvb[i]) + return 0; + return 1; +} + static inline struct list_head * dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) { @@ -609,7 +643,8 @@ struct dlm_finalize_reco { u8 node_idx; u8 dead_node; - __be16 pad1; + u8 flags; + u8 pad1; __be32 pad2; }; @@ -676,6 +711,7 @@ void dlm_wait_for_recovery(struct dlm_ctxt *dlm); void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); +int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); void dlm_put(struct dlm_ctxt *dlm); struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); @@ -687,14 +723,20 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *lockres); -void dlm_lockres_get(struct dlm_lock_resource *res); +static inline void dlm_lockres_get(struct dlm_lock_resource *res) +{ + /* This is called on every lookup, so it might be worth + * inlining. */ + kref_get(&res->refs); +} void dlm_lockres_put(struct dlm_lock_resource *res); void __dlm_unhash_lockres(struct dlm_lock_resource *res); void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, const char *name, - unsigned int len); + unsigned int len, + unsigned int hash); struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, const char *name, unsigned int len); @@ -705,6 +747,7 @@ void dlm_change_lockres_owner(struct dlm_ctxt *dlm, u8 owner); struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, const char *lockid, + int namelen, int flags); struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, const char *name, @@ -780,8 +823,6 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 nodenum, u8 *real_master); -int dlm_lockres_master_requery(struct dlm_ctxt *dlm, - struct dlm_lock_resource *res, u8 *real_master); int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, @@ -819,6 +860,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node); int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); +int __dlm_lockres_unused(struct dlm_lock_resource *res); static inline const char * dlm_lock_mode_name(int mode) { diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index 8285228d9e3..c764dc8e40a 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -214,6 +214,9 @@ grant: if (lock->ml.node == dlm->node_num) mlog(0, "doing in-place convert for nonlocal lock\n"); lock->ml.type = type; + if (lock->lksb->flags & DLM_LKSB_PUT_LVB) + memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); + status = DLM_NORMAL; *call_ast = 1; goto unlock_exit; @@ -231,8 +234,7 @@ switch_queues: lock->ml.convert_type = type; /* do not alter lock refcount. switching lists. */ - list_del_init(&lock->list); - list_add_tail(&lock->list, &res->converting); + list_move_tail(&lock->list, &res->converting); unlock_exit: spin_unlock(&lock->spinlock); @@ -248,8 +250,7 @@ void dlm_revert_pending_convert(struct dlm_lock_resource *res, struct dlm_lock *lock) { /* do not alter lock refcount. switching lists. */ - list_del_init(&lock->list); - list_add_tail(&lock->list, &res->granted); + list_move_tail(&lock->list, &res->granted); lock->ml.convert_type = LKM_IVMODE; lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } @@ -294,8 +295,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, res->state |= DLM_LOCK_RES_IN_PROGRESS; /* move lock to local convert queue */ /* do not alter lock refcount. switching lists. */ - list_del_init(&lock->list); - list_add_tail(&lock->list, &res->converting); + list_move_tail(&lock->list, &res->converting); lock->convert_pending = 1; lock->ml.convert_type = type; @@ -464,6 +464,12 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) } spin_lock(&res->spinlock); + status = __dlm_lockres_state_to_status(res); + if (status != DLM_NORMAL) { + spin_unlock(&res->spinlock); + dlm_error(status); + goto leave; + } list_for_each(iter, &res->granted) { lock = list_entry(iter, struct dlm_lock, list); if (lock->ml.cookie == cnv->cookie && @@ -473,6 +479,21 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) } lock = NULL; } + if (!lock) { + __dlm_print_one_lock_resource(res); + list_for_each(iter, &res->granted) { + lock = list_entry(iter, struct dlm_lock, list); + if (lock->ml.node == cnv->node_idx) { + mlog(ML_ERROR, "There is something here " + "for node %u, lock->ml.cookie=%llu, " + "cnv->cookie=%llu\n", cnv->node_idx, + (unsigned long long)lock->ml.cookie, + (unsigned long long)cnv->cookie); + break; + } + } + lock = NULL; + } spin_unlock(&res->spinlock); if (!lock) { status = DLM_IVLOCKID; diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index c7eae5d3324..3f6c8d88f7a 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -37,10 +37,8 @@ #include "dlmapi.h" #include "dlmcommon.h" -#include "dlmdebug.h" #include "dlmdomain.h" -#include "dlmdebug.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" @@ -120,6 +118,7 @@ void dlm_print_one_lock(struct dlm_lock *lockid) } EXPORT_SYMBOL_GPL(dlm_print_one_lock); +#if 0 void dlm_dump_lock_resources(struct dlm_ctxt *dlm) { struct dlm_lock_resource *res; @@ -136,12 +135,13 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm) spin_lock(&dlm->spinlock); for (i=0; i<DLM_HASH_BUCKETS; i++) { - bucket = &(dlm->lockres_hash[i]); + bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry(res, iter, bucket, hash_node) dlm_print_one_lock_resource(res); } spin_unlock(&dlm->spinlock); } +#endif /* 0 */ static const char *dlm_errnames[] = { [DLM_NORMAL] = "DLM_NORMAL", diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h deleted file mode 100644 index 6858510c3cc..00000000000 --- a/fs/ocfs2/dlm/dlmdebug.h +++ /dev/null @@ -1,30 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * - * dlmdebug.h - * - * Copyright (C) 2004 Oracle. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA. - * - */ - -#ifndef DLMDEBUG_H -#define DLMDEBUG_H - -void dlm_dump_lock_resources(struct dlm_ctxt *dlm); - -#endif diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 8f3a9e3106f..8d1065f8b3b 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -41,7 +41,6 @@ #include "dlmapi.h" #include "dlmcommon.h" -#include "dlmdebug.h" #include "dlmdomain.h" #include "dlmver.h" @@ -49,6 +48,33 @@ #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) #include "cluster/masklog.h" +static void dlm_free_pagevec(void **vec, int pages) +{ + while (pages--) + free_page((unsigned long)vec[pages]); + kfree(vec); +} + +static void **dlm_alloc_pagevec(int pages) +{ + void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL); + int i; + + if (!vec) + return NULL; + + for (i = 0; i < pages; i++) + if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL))) + goto out_free; + + mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n", + pages, DLM_HASH_PAGES, (unsigned long)DLM_BUCKETS_PER_PAGE); + return vec; +out_free: + dlm_free_pagevec(vec, i); + return NULL; +} + /* * * spinlock lock ordering: if multiple locks are needed, obey this ordering: @@ -62,7 +88,7 @@ * */ -spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(dlm_domain_lock); LIST_HEAD(dlm_domains); static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); @@ -90,8 +116,7 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm, assert_spin_locked(&dlm->spinlock); q = &res->lockname; - q->hash = full_name_hash(q->name, q->len); - bucket = &(dlm->lockres_hash[q->hash % DLM_HASH_BUCKETS]); + bucket = dlm_lockres_hash(dlm, q->hash); /* get a reference for our hashtable */ dlm_lockres_get(res); @@ -100,34 +125,32 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm, } struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, - const char *name, - unsigned int len) + const char *name, + unsigned int len, + unsigned int hash) { - unsigned int hash; - struct hlist_node *iter; - struct dlm_lock_resource *tmpres=NULL; struct hlist_head *bucket; + struct hlist_node *list; mlog_entry("%.*s\n", len, name); assert_spin_locked(&dlm->spinlock); - hash = full_name_hash(name, len); - - bucket = &(dlm->lockres_hash[hash % DLM_HASH_BUCKETS]); - - /* check for pre-existing lock */ - hlist_for_each(iter, bucket) { - tmpres = hlist_entry(iter, struct dlm_lock_resource, hash_node); - if (tmpres->lockname.len == len && - memcmp(tmpres->lockname.name, name, len) == 0) { - dlm_lockres_get(tmpres); - break; - } + bucket = dlm_lockres_hash(dlm, hash); - tmpres = NULL; + hlist_for_each(list, bucket) { + struct dlm_lock_resource *res = hlist_entry(list, + struct dlm_lock_resource, hash_node); + if (res->lockname.name[0] != name[0]) + continue; + if (unlikely(res->lockname.len != len)) + continue; + if (memcmp(res->lockname.name + 1, name + 1, len - 1)) + continue; + dlm_lockres_get(res); + return res; } - return tmpres; + return NULL; } struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, @@ -135,9 +158,10 @@ struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, unsigned int len) { struct dlm_lock_resource *res; + unsigned int hash = dlm_lockid_hash(name, len); spin_lock(&dlm->spinlock); - res = __dlm_lookup_lockres(dlm, name, len); + res = __dlm_lookup_lockres(dlm, name, len, hash); spin_unlock(&dlm->spinlock); return res; } @@ -194,7 +218,7 @@ static int dlm_wait_on_domain_helper(const char *domain) static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) { if (dlm->lockres_hash) - free_page((unsigned long) dlm->lockres_hash); + dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); if (dlm->name) kfree(dlm->name); @@ -278,11 +302,21 @@ int dlm_domain_fully_joined(struct dlm_ctxt *dlm) return ret; } +static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm) +{ + if (dlm->dlm_worker) { + flush_workqueue(dlm->dlm_worker); + destroy_workqueue(dlm->dlm_worker); + dlm->dlm_worker = NULL; + } +} + static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm) { dlm_unregister_domain_handlers(dlm); dlm_complete_thread(dlm); dlm_complete_recovery_thread(dlm); + dlm_destroy_dlm_worker(dlm); /* We've left the domain. Now we can take ourselves out of the * list and allow the kref stuff to help us free the @@ -304,8 +338,8 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm) restart: spin_lock(&dlm->spinlock); for (i = 0; i < DLM_HASH_BUCKETS; i++) { - while (!hlist_empty(&dlm->lockres_hash[i])) { - res = hlist_entry(dlm->lockres_hash[i].first, + while (!hlist_empty(dlm_lockres_hash(dlm, i))) { + res = hlist_entry(dlm_lockres_hash(dlm, i)->first, struct dlm_lock_resource, hash_node); /* need reference when manually grabbing lockres */ dlm_lockres_get(res); @@ -374,12 +408,13 @@ static void __dlm_print_nodes(struct dlm_ctxt *dlm) assert_spin_locked(&dlm->spinlock); - mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name); + printk(KERN_INFO "ocfs2_dlm: Nodes in domain (\"%s\"): ", dlm->name); while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1)) < O2NM_MAX_NODES) { - mlog(ML_NOTICE, " node %d\n", node); + printk("%d ", node); } + printk("\n"); } static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data) @@ -395,7 +430,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data) node = exit_msg->node_idx; - mlog(0, "Node %u leaves domain %s\n", node, dlm->name); + printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name); spin_lock(&dlm->spinlock); clear_bit(node, dlm->domain_map); @@ -644,6 +679,8 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data) set_bit(assert->node_idx, dlm->domain_map); __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); + printk(KERN_INFO "ocfs2_dlm: Node %u joins domain %s\n", + assert->node_idx, dlm->name); __dlm_print_nodes(dlm); /* notify anything attached to the heartbeat events */ @@ -1126,6 +1163,13 @@ static int dlm_join_domain(struct dlm_ctxt *dlm) goto bail; } + dlm->dlm_worker = create_singlethread_workqueue("dlm_wq"); + if (!dlm->dlm_worker) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + do { unsigned int backoff; status = dlm_try_to_join_domain(dlm); @@ -1166,6 +1210,7 @@ bail: dlm_unregister_domain_handlers(dlm); dlm_complete_thread(dlm); dlm_complete_recovery_thread(dlm); + dlm_destroy_dlm_worker(dlm); } return status; @@ -1191,7 +1236,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, goto leave; } - dlm->lockres_hash = (struct hlist_head *) __get_free_page(GFP_KERNEL); + dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES); if (!dlm->lockres_hash) { mlog_errno(-ENOMEM); kfree(dlm->name); @@ -1200,8 +1245,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, goto leave; } - for (i=0; i<DLM_HASH_BUCKETS; i++) - INIT_HLIST_HEAD(&dlm->lockres_hash[i]); + for (i = 0; i < DLM_HASH_BUCKETS; i++) + INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i)); strcpy(dlm->name, domain); dlm->key = key; @@ -1231,6 +1276,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, dlm->dlm_thread_task = NULL; dlm->dlm_reco_thread_task = NULL; + dlm->dlm_worker = NULL; init_waitqueue_head(&dlm->dlm_thread_wq); init_waitqueue_head(&dlm->dlm_reco_thread_wq); init_waitqueue_head(&dlm->reco.event); diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 7273d9fa6ba..0368c640218 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c @@ -116,7 +116,7 @@ static int dlmfs_file_open(struct inode *inode, * doesn't make sense for LVB writes. */ file->f_flags &= ~O_APPEND; - fp = kmalloc(sizeof(*fp), GFP_KERNEL); + fp = kmalloc(sizeof(*fp), GFP_NOFS); if (!fp) { status = -ENOMEM; goto bail; @@ -196,7 +196,7 @@ static ssize_t dlmfs_file_read(struct file *filp, else readlen = count - *ppos; - lvb_buf = kmalloc(readlen, GFP_KERNEL); + lvb_buf = kmalloc(readlen, GFP_NOFS); if (!lvb_buf) return -ENOMEM; @@ -240,7 +240,7 @@ static ssize_t dlmfs_file_write(struct file *filp, else writelen = count - *ppos; - lvb_buf = kmalloc(writelen, GFP_KERNEL); + lvb_buf = kmalloc(writelen, GFP_NOFS); if (!lvb_buf) return -ENOMEM; @@ -335,7 +335,6 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb) inode->i_mode = mode; inode->i_uid = current->fsuid; inode->i_gid = current->fsgid; - inode->i_blksize = PAGE_CACHE_SIZE; inode->i_blocks = 0; inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; @@ -362,7 +361,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent, inode->i_mode = mode; inode->i_uid = current->fsuid; inode->i_gid = current->fsgid; - inode->i_blksize = PAGE_CACHE_SIZE; inode->i_blocks = 0; inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; @@ -629,9 +627,7 @@ static void __exit exit_dlmfs_fs(void) flush_workqueue(user_dlm_worker); destroy_workqueue(user_dlm_worker); - if (kmem_cache_destroy(dlmfs_inode_cache)) - printk(KERN_INFO "dlmfs_inode_cache: not all structures " - "were freed\n"); + kmem_cache_destroy(dlmfs_inode_cache); } MODULE_AUTHOR("Oracle"); diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 6fea28318d6..42a1b91979b 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -53,7 +53,7 @@ #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" -static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(dlm_cookie_lock); static u64 dlm_next_cookie = 1; static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, @@ -201,6 +201,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, struct dlm_lock *lock, int flags) { enum dlm_status status = DLM_DENIED; + int lockres_changed = 1; mlog_entry("type=%d\n", lock->ml.type); mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, @@ -226,8 +227,25 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->lock_pending = 0; if (status != DLM_NORMAL) { - if (status != DLM_NOTQUEUED) + if (status == DLM_RECOVERING && + dlm_is_recovery_lock(res->lockname.name, + res->lockname.len)) { + /* recovery lock was mastered by dead node. + * we need to have calc_usage shoot down this + * lockres and completely remaster it. */ + mlog(0, "%s: recovery lock was owned by " + "dead node %u, remaster it now.\n", + dlm->name, res->owner); + } else if (status != DLM_NOTQUEUED) { + /* + * DO NOT call calc_usage, as this would unhash + * the remote lockres before we ever get to use + * it. treat as if we never made any change to + * the lockres. + */ + lockres_changed = 0; dlm_error(status); + } dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); } else if (dlm_is_recovery_lock(res->lockname.name, @@ -239,12 +257,12 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, mlog(0, "%s: $RECOVERY lock for this node (%u) is " "mastered by %u; got lock, manually granting (no ast)\n", dlm->name, dlm->node_num, res->owner); - list_del_init(&lock->list); - list_add_tail(&lock->list, &res->granted); + list_move_tail(&lock->list, &res->granted); } spin_unlock(&res->spinlock); - dlm_lockres_calc_usage(dlm, res); + if (lockres_changed) + dlm_lockres_calc_usage(dlm, res); wake_up(&res->wq); return status; @@ -281,6 +299,14 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, if (tmpret >= 0) { // successfully sent and received ret = status; // this is already a dlm_status + if (ret == DLM_REJECTED) { + mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " + "no longer owned by %u. that node is coming back " + "up currently.\n", dlm->name, create.namelen, + create.name, res->owner); + dlm_print_one_lock_resource(res); + BUG(); + } } else { mlog_errno(tmpret); if (dlm_is_host_down(tmpret)) { @@ -382,13 +408,13 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, struct dlm_lock *lock; int kernel_allocated = 0; - lock = kcalloc(1, sizeof(*lock), GFP_KERNEL); + lock = kcalloc(1, sizeof(*lock), GFP_NOFS); if (!lock) return NULL; if (!lksb) { /* zero memory only if kernel-allocated */ - lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL); + lksb = kcalloc(1, sizeof(*lksb), GFP_NOFS); if (!lksb) { kfree(lock); return NULL; @@ -429,11 +455,16 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data) if (!dlm_grab(dlm)) return DLM_REJECTED; - mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), - "Domain %s not fully joined!\n", dlm->name); - name = create->name; namelen = create->namelen; + status = DLM_REJECTED; + if (!dlm_domain_fully_joined(dlm)) { + mlog(ML_ERROR, "Domain %s not fully joined, but node %u is " + "sending a create_lock message for lock %.*s!\n", + dlm->name, create->node_idx, namelen, name); + dlm_error(status); + goto leave; + } status = DLM_IVBUFLEN; if (namelen > DLM_LOCKID_NAME_MAX) { @@ -509,8 +540,8 @@ static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie) enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, struct dlm_lockstatus *lksb, int flags, - const char *name, dlm_astlockfunc_t *ast, void *data, - dlm_bastlockfunc_t *bast) + const char *name, int namelen, dlm_astlockfunc_t *ast, + void *data, dlm_bastlockfunc_t *bast) { enum dlm_status status; struct dlm_lock_resource *res = NULL; @@ -540,7 +571,7 @@ enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, recovery = (flags & LKM_RECOVERY); if (recovery && - (!dlm_is_recovery_lock(name, strlen(name)) || convert) ) { + (!dlm_is_recovery_lock(name, namelen) || convert) ) { dlm_error(status); goto error; } @@ -612,7 +643,7 @@ retry_convert: } status = DLM_IVBUFLEN; - if (strlen(name) > DLM_LOCKID_NAME_MAX || strlen(name) < 1) { + if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) { dlm_error(status); goto error; } @@ -628,7 +659,7 @@ retry_convert: dlm_wait_for_recovery(dlm); /* find or create the lock resource */ - res = dlm_get_lock_resource(dlm, name, flags); + res = dlm_get_lock_resource(dlm, name, namelen, flags); if (!res) { status = DLM_IVLOCKID; dlm_error(status); @@ -669,18 +700,22 @@ retry_lock: msleep(100); /* no waiting for dlm_reco_thread */ if (recovery) { - if (status == DLM_RECOVERING) { - mlog(0, "%s: got RECOVERING " - "for $REOCVERY lock, master " - "was %u\n", dlm->name, - res->owner); - dlm_wait_for_node_death(dlm, res->owner, - DLM_NODE_DEATH_WAIT_MAX); - } + if (status != DLM_RECOVERING) + goto retry_lock; + + mlog(0, "%s: got RECOVERING " + "for $RECOVERY lock, master " + "was %u\n", dlm->name, + res->owner); + /* wait to see the node go down, then + * drop down and allow the lockres to + * get cleaned up. need to remaster. */ + dlm_wait_for_node_death(dlm, res->owner, + DLM_NODE_DEATH_WAIT_MAX); } else { dlm_wait_for_recovery(dlm); + goto retry_lock; } - goto retry_lock; } if (status != DLM_NORMAL) { diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 940be4c13b1..f784177b624 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -47,7 +47,6 @@ #include "dlmapi.h" #include "dlmcommon.h" -#include "dlmdebug.h" #include "dlmdomain.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) @@ -74,6 +73,7 @@ struct dlm_master_list_entry wait_queue_head_t wq; atomic_t woken; struct kref mle_refs; + int inuse; unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; @@ -127,18 +127,30 @@ static inline int dlm_mle_equal(struct dlm_ctxt *dlm, return 1; } -#if 0 -/* Code here is included but defined out as it aids debugging */ +#define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m) +static void _dlm_print_nodemap(unsigned long *map, const char *mapname) +{ + int i; + printk("%s=[ ", mapname); + for (i=0; i<O2NM_MAX_NODES; i++) + if (test_bit(i, map)) + printk("%d ", i); + printk("]"); +} -void dlm_print_one_mle(struct dlm_master_list_entry *mle) +static void dlm_print_one_mle(struct dlm_master_list_entry *mle) { - int i = 0, refs; + int refs; char *type; char attached; u8 master; unsigned int namelen; const char *name; struct kref *k; + unsigned long *maybe = mle->maybe_map, + *vote = mle->vote_map, + *resp = mle->response_map, + *node = mle->node_map; k = &mle->mle_refs; if (mle->type == DLM_MLE_BLOCK) @@ -159,18 +171,29 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle) name = mle->u.res->lockname.name; } - mlog(ML_NOTICE, " #%3d: %3s %3d %3u %3u %c (%d)%.*s\n", - i, type, refs, master, mle->new_master, attached, - namelen, namelen, name); + mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ", + namelen, name, type, refs, master, mle->new_master, attached, + mle->inuse); + dlm_print_nodemap(maybe); + printk(", "); + dlm_print_nodemap(vote); + printk(", "); + dlm_print_nodemap(resp); + printk(", "); + dlm_print_nodemap(node); + printk(", "); + printk("\n"); } +#if 0 +/* Code here is included but defined out as it aids debugging */ + static void dlm_dump_mles(struct dlm_ctxt *dlm) { struct dlm_master_list_entry *mle; struct list_head *iter; mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name); - mlog(ML_NOTICE, " ####: type refs owner new events? lockname nodemap votemap respmap maybemap\n"); spin_lock(&dlm->master_lock); list_for_each(iter, &dlm->master_list) { mle = list_entry(iter, struct dlm_master_list_entry, list); @@ -314,6 +337,31 @@ static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, spin_unlock(&dlm->spinlock); } +static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) +{ + struct dlm_ctxt *dlm; + dlm = mle->dlm; + + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&dlm->master_lock); + mle->inuse++; + kref_get(&mle->mle_refs); +} + +static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) +{ + struct dlm_ctxt *dlm; + dlm = mle->dlm; + + spin_lock(&dlm->spinlock); + spin_lock(&dlm->master_lock); + mle->inuse--; + __dlm_put_mle(mle); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + +} + /* remove from list and free */ static void __dlm_put_mle(struct dlm_master_list_entry *mle) { @@ -322,9 +370,14 @@ static void __dlm_put_mle(struct dlm_master_list_entry *mle) assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->master_lock); - BUG_ON(!atomic_read(&mle->mle_refs.refcount)); - - kref_put(&mle->mle_refs, dlm_mle_release); + if (!atomic_read(&mle->mle_refs.refcount)) { + /* this may or may not crash, but who cares. + * it's a BUG. */ + mlog(ML_ERROR, "bad mle: %p\n", mle); + dlm_print_one_mle(mle); + BUG(); + } else + kref_put(&mle->mle_refs, dlm_mle_release); } @@ -367,6 +420,7 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle, memset(mle->response_map, 0, sizeof(mle->response_map)); mle->master = O2NM_MAX_NODES; mle->new_master = O2NM_MAX_NODES; + mle->inuse = 0; if (mle->type == DLM_MLE_MASTER) { BUG_ON(!res); @@ -564,6 +618,28 @@ static void dlm_lockres_release(struct kref *kref) mlog(0, "destroying lockres %.*s\n", res->lockname.len, res->lockname.name); + if (!hlist_unhashed(&res->hash_node) || + !list_empty(&res->granted) || + !list_empty(&res->converting) || + !list_empty(&res->blocked) || + !list_empty(&res->dirty) || + !list_empty(&res->recovering) || + !list_empty(&res->purge)) { + mlog(ML_ERROR, + "Going to BUG for resource %.*s." + " We're on a list! [%c%c%c%c%c%c%c]\n", + res->lockname.len, res->lockname.name, + !hlist_unhashed(&res->hash_node) ? 'H' : ' ', + !list_empty(&res->granted) ? 'G' : ' ', + !list_empty(&res->converting) ? 'C' : ' ', + !list_empty(&res->blocked) ? 'B' : ' ', + !list_empty(&res->dirty) ? 'D' : ' ', + !list_empty(&res->recovering) ? 'R' : ' ', + !list_empty(&res->purge) ? 'P' : ' '); + + dlm_print_one_lock_resource(res); + } + /* By the time we're ready to blow this guy away, we shouldn't * be on any lists. */ BUG_ON(!hlist_unhashed(&res->hash_node)); @@ -579,11 +655,6 @@ static void dlm_lockres_release(struct kref *kref) kfree(res); } -void dlm_lockres_get(struct dlm_lock_resource *res) -{ - kref_get(&res->refs); -} - void dlm_lockres_put(struct dlm_lock_resource *res) { kref_put(&res->refs, dlm_lockres_release); @@ -603,7 +674,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm, memcpy(qname, name, namelen); res->lockname.len = namelen; - res->lockname.hash = full_name_hash(name, namelen); + res->lockname.hash = dlm_lockid_hash(name, namelen); init_waitqueue_head(&res->wq); spin_lock_init(&res->spinlock); @@ -637,11 +708,11 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, { struct dlm_lock_resource *res; - res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL); + res = kmalloc(sizeof(struct dlm_lock_resource), GFP_NOFS); if (!res) return NULL; - res->lockname.name = kmalloc(namelen, GFP_KERNEL); + res->lockname.name = kmalloc(namelen, GFP_NOFS); if (!res->lockname.name) { kfree(res); return NULL; @@ -669,6 +740,7 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, */ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, const char *lockid, + int namelen, int flags) { struct dlm_lock_resource *tmpres=NULL, *res=NULL; @@ -677,19 +749,19 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, int blocked = 0; int ret, nodenum; struct dlm_node_iter iter; - unsigned int namelen; + unsigned int hash; int tries = 0; int bit, wait_on_recovery = 0; BUG_ON(!lockid); - namelen = strlen(lockid); + hash = dlm_lockid_hash(lockid, namelen); mlog(0, "get lockres %s (len %d)\n", lockid, namelen); lookup: spin_lock(&dlm->spinlock); - tmpres = __dlm_lookup_lockres(dlm, lockid, namelen); + tmpres = __dlm_lookup_lockres(dlm, lockid, namelen, hash); if (tmpres) { spin_unlock(&dlm->spinlock); mlog(0, "found in hash!\n"); @@ -704,7 +776,7 @@ lookup: mlog(0, "allocating a new resource\n"); /* nothing found and we need to allocate one. */ alloc_mle = (struct dlm_master_list_entry *) - kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL); + kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); if (!alloc_mle) goto leave; res = dlm_new_lockres(dlm, lockid, namelen); @@ -790,10 +862,11 @@ lookup: * if so, the creator of the BLOCK may try to put the last * ref at this time in the assert master handler, so we * need an extra one to keep from a bad ptr deref. */ - dlm_get_mle(mle); + dlm_get_mle_inuse(mle); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); +redo_request: while (wait_on_recovery) { /* any cluster changes that occurred after dropping the * dlm spinlock would be detectable be a change on the mle, @@ -812,7 +885,7 @@ lookup: } dlm_kick_recovery_thread(dlm); - msleep(100); + msleep(1000); dlm_wait_for_recovery(dlm); spin_lock(&dlm->spinlock); @@ -825,13 +898,15 @@ lookup: } else wait_on_recovery = 0; spin_unlock(&dlm->spinlock); + + if (wait_on_recovery) + dlm_wait_for_node_recovery(dlm, bit, 10000); } /* must wait for lock to be mastered elsewhere */ if (blocked) goto wait; -redo_request: ret = -EINVAL; dlm_node_iter_init(mle->vote_map, &iter); while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { @@ -856,6 +931,7 @@ wait: /* keep going until the response map includes all nodes */ ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); if (ret < 0) { + wait_on_recovery = 1; mlog(0, "%s:%.*s: node map changed, redo the " "master request now, blocked=%d\n", dlm->name, res->lockname.len, @@ -866,7 +942,7 @@ wait: dlm->name, res->lockname.len, res->lockname.name, blocked); dlm_print_one_lock_resource(res); - /* dlm_print_one_mle(mle); */ + dlm_print_one_mle(mle); tries = 0; } goto redo_request; @@ -880,7 +956,7 @@ wait: dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle(mle); /* put the extra ref */ - dlm_put_mle(mle); + dlm_put_mle_inuse(mle); wake_waiters: spin_lock(&res->spinlock); @@ -921,12 +997,14 @@ recheck: spin_unlock(&res->spinlock); /* this will cause the master to re-assert across * the whole cluster, freeing up mles */ - ret = dlm_do_master_request(mle, res->owner); - if (ret < 0) { - /* give recovery a chance to run */ - mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); - msleep(500); - goto recheck; + if (res->owner != dlm->node_num) { + ret = dlm_do_master_request(mle, res->owner); + if (ret < 0) { + /* give recovery a chance to run */ + mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); + msleep(500); + goto recheck; + } } ret = 0; goto leave; @@ -962,6 +1040,12 @@ recheck: "rechecking now\n", dlm->name, res->lockname.len, res->lockname.name); goto recheck; + } else { + if (!voting_done) { + mlog(0, "map not changed and voting not done " + "for %s:%.*s\n", dlm->name, res->lockname.len, + res->lockname.name); + } } if (m != O2NM_MAX_NODES) { @@ -1129,18 +1213,6 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, set_bit(node, mle->vote_map); } else { mlog(ML_ERROR, "node down! %d\n", node); - - /* if the node wasn't involved in mastery skip it, - * but clear it out from the maps so that it will - * not affect mastery of this lockres */ - clear_bit(node, mle->response_map); - clear_bit(node, mle->vote_map); - if (!test_bit(node, mle->maybe_map)) - goto next; - - /* if we're already blocked on lock mastery, and the - * dead node wasn't the expected master, or there is - * another node in the maybe_map, keep waiting */ if (blocked) { int lowest = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); @@ -1148,54 +1220,53 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, /* act like it was never there */ clear_bit(node, mle->maybe_map); - if (node != lowest) - goto next; - - mlog(ML_ERROR, "expected master %u died while " - "this node was blocked waiting on it!\n", - node); - lowest = find_next_bit(mle->maybe_map, - O2NM_MAX_NODES, - lowest+1); - if (lowest < O2NM_MAX_NODES) { - mlog(0, "still blocked. waiting " - "on %u now\n", lowest); - goto next; + if (node == lowest) { + mlog(0, "expected master %u died" + " while this node was blocked " + "waiting on it!\n", node); + lowest = find_next_bit(mle->maybe_map, + O2NM_MAX_NODES, + lowest+1); + if (lowest < O2NM_MAX_NODES) { + mlog(0, "%s:%.*s:still " + "blocked. waiting on %u " + "now\n", dlm->name, + res->lockname.len, + res->lockname.name, + lowest); + } else { + /* mle is an MLE_BLOCK, but + * there is now nothing left to + * block on. we need to return + * all the way back out and try + * again with an MLE_MASTER. + * dlm_do_local_recovery_cleanup + * has already run, so the mle + * refcount is ok */ + mlog(0, "%s:%.*s: no " + "longer blocking. try to " + "master this here\n", + dlm->name, + res->lockname.len, + res->lockname.name); + mle->type = DLM_MLE_MASTER; + mle->u.res = res; + } } - - /* mle is an MLE_BLOCK, but there is now - * nothing left to block on. we need to return - * all the way back out and try again with - * an MLE_MASTER. dlm_do_local_recovery_cleanup - * has already run, so the mle refcount is ok */ - mlog(0, "no longer blocking. we can " - "try to master this here\n"); - mle->type = DLM_MLE_MASTER; - memset(mle->maybe_map, 0, - sizeof(mle->maybe_map)); - memset(mle->response_map, 0, - sizeof(mle->maybe_map)); - memcpy(mle->vote_map, mle->node_map, - sizeof(mle->node_map)); - mle->u.res = res; - set_bit(dlm->node_num, mle->maybe_map); - - ret = -EAGAIN; - goto next; } - clear_bit(node, mle->maybe_map); - if (node > dlm->node_num) - goto next; - - mlog(0, "dead node in map!\n"); - /* yuck. go back and re-contact all nodes - * in the vote_map, removing this node. */ - memset(mle->response_map, 0, - sizeof(mle->response_map)); + /* now blank out everything, as if we had never + * contacted anyone */ + memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); + memset(mle->response_map, 0, sizeof(mle->response_map)); + /* reset the vote_map to the current node_map */ + memcpy(mle->vote_map, mle->node_map, + sizeof(mle->node_map)); + /* put myself into the maybe map */ + if (mle->type != DLM_MLE_BLOCK) + set_bit(dlm->node_num, mle->maybe_map); } ret = -EAGAIN; -next: node = dlm_bitmap_diff_iter_next(&bdi, &sc); } return ret; @@ -1316,7 +1387,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data) struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; char *name; - unsigned int namelen; + unsigned int namelen, hash; int found, ret; int set_maybe; int dispatch_assert = 0; @@ -1331,6 +1402,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data) name = request->name; namelen = request->namelen; + hash = dlm_lockid_hash(name, namelen); if (namelen > DLM_LOCKID_NAME_MAX) { response = DLM_IVBUFLEN; @@ -1339,7 +1411,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data) way_up_top: spin_lock(&dlm->spinlock); - res = __dlm_lookup_lockres(dlm, name, namelen); + res = __dlm_lookup_lockres(dlm, name, namelen, hash); if (res) { spin_unlock(&dlm->spinlock); @@ -1459,21 +1531,18 @@ way_up_top: spin_unlock(&dlm->spinlock); mle = (struct dlm_master_list_entry *) - kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL); + kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); if (!mle) { response = DLM_MASTER_RESP_ERROR; mlog_errno(-ENOMEM); goto send_response; } - spin_lock(&dlm->spinlock); - dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, - name, namelen); - spin_unlock(&dlm->spinlock); goto way_up_top; } // mlog(0, "this is second time thru, already allocated, " // "add the block.\n"); + dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); set_bit(request->node_idx, mle->maybe_map); list_add(&mle->list, &dlm->master_list); response = DLM_MASTER_RESP_NO; @@ -1556,6 +1625,8 @@ again: dlm_node_iter_init(nodemap, &iter); while ((to = dlm_node_iter_next(&iter)) >= 0) { int r = 0; + struct dlm_master_list_entry *mle = NULL; + mlog(0, "sending assert master to %d (%.*s)\n", to, namelen, lockname); memset(&assert, 0, sizeof(assert)); @@ -1567,20 +1638,28 @@ again: tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, &assert, sizeof(assert), to, &r); if (tmpret < 0) { - mlog(ML_ERROR, "assert_master returned %d!\n", tmpret); + mlog(0, "assert_master returned %d!\n", tmpret); if (!dlm_is_host_down(tmpret)) { - mlog(ML_ERROR, "unhandled error!\n"); + mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); BUG(); } /* a node died. finish out the rest of the nodes. */ - mlog(ML_ERROR, "link to %d went down!\n", to); + mlog(0, "link to %d went down!\n", to); /* any nonzero status return will do */ ret = tmpret; } else if (r < 0) { /* ok, something horribly messed. kill thyself. */ mlog(ML_ERROR,"during assert master of %.*s to %u, " "got %d.\n", namelen, lockname, to, r); - dlm_dump_lock_resources(dlm); + spin_lock(&dlm->spinlock); + spin_lock(&dlm->master_lock); + if (dlm_find_mle(dlm, &mle, (char *)lockname, + namelen)) { + dlm_print_one_mle(mle); + __dlm_put_mle(mle); + } + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); BUG(); } else if (r == EAGAIN) { mlog(0, "%.*s: node %u create mles on other " @@ -1612,7 +1691,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data) struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; struct dlm_lock_resource *res = NULL; char *name; - unsigned int namelen; + unsigned int namelen, hash; u32 flags; int master_request = 0; int ret = 0; @@ -1622,6 +1701,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data) name = assert->name; namelen = assert->namelen; + hash = dlm_lockid_hash(name, namelen); flags = be32_to_cpu(assert->flags); if (namelen > DLM_LOCKID_NAME_MAX) { @@ -1646,7 +1726,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data) if (bit >= O2NM_MAX_NODES) { /* not necessarily an error, though less likely. * could be master just re-asserting. */ - mlog(ML_ERROR, "no bits set in the maybe_map, but %u " + mlog(0, "no bits set in the maybe_map, but %u " "is asserting! (%.*s)\n", assert->node_idx, namelen, name); } else if (bit != assert->node_idx) { @@ -1658,19 +1738,36 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data) * number winning the mastery will respond * YES to mastery requests, but this node * had no way of knowing. let it pass. */ - mlog(ML_ERROR, "%u is the lowest node, " + mlog(0, "%u is the lowest node, " "%u is asserting. (%.*s) %u must " "have begun after %u won.\n", bit, assert->node_idx, namelen, name, bit, assert->node_idx); } } + if (mle->type == DLM_MLE_MIGRATION) { + if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { + mlog(0, "%s:%.*s: got cleanup assert" + " from %u for migration\n", + dlm->name, namelen, name, + assert->node_idx); + } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { + mlog(0, "%s:%.*s: got unrelated assert" + " from %u for migration, ignoring\n", + dlm->name, namelen, name, + assert->node_idx); + __dlm_put_mle(mle); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + goto done; + } + } } spin_unlock(&dlm->master_lock); /* ok everything checks out with the MLE * now check to see if there is a lockres */ - res = __dlm_lookup_lockres(dlm, name, namelen); + res = __dlm_lookup_lockres(dlm, name, namelen, hash); if (res) { spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { @@ -1679,7 +1776,8 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data) goto kill; } if (!mle) { - if (res->owner != assert->node_idx) { + if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && + res->owner != assert->node_idx) { mlog(ML_ERROR, "assert_master from " "%u, but current owner is " "%u! (%.*s)\n", @@ -1732,6 +1830,7 @@ ok: if (mle) { int extra_ref = 0; int nn = -1; + int rr, err = 0; spin_lock(&mle->spinlock); if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) @@ -1751,27 +1850,64 @@ ok: wake_up(&mle->wq); spin_unlock(&mle->spinlock); - if (mle->type == DLM_MLE_MIGRATION && res) { - mlog(0, "finishing off migration of lockres %.*s, " - "from %u to %u\n", - res->lockname.len, res->lockname.name, - dlm->node_num, mle->new_master); + if (res) { spin_lock(&res->spinlock); - res->state &= ~DLM_LOCK_RES_MIGRATING; - dlm_change_lockres_owner(dlm, res, mle->new_master); - BUG_ON(res->state & DLM_LOCK_RES_DIRTY); + if (mle->type == DLM_MLE_MIGRATION) { + mlog(0, "finishing off migration of lockres %.*s, " + "from %u to %u\n", + res->lockname.len, res->lockname.name, + dlm->node_num, mle->new_master); + res->state &= ~DLM_LOCK_RES_MIGRATING; + dlm_change_lockres_owner(dlm, res, mle->new_master); + BUG_ON(res->state & DLM_LOCK_RES_DIRTY); + } else { + dlm_change_lockres_owner(dlm, res, mle->master); + } spin_unlock(&res->spinlock); } - /* master is known, detach if not already detached */ - dlm_mle_detach_hb_events(dlm, mle); - dlm_put_mle(mle); - + + /* master is known, detach if not already detached. + * ensures that only one assert_master call will happen + * on this mle. */ + spin_lock(&dlm->spinlock); + spin_lock(&dlm->master_lock); + + rr = atomic_read(&mle->mle_refs.refcount); + if (mle->inuse > 0) { + if (extra_ref && rr < 3) + err = 1; + else if (!extra_ref && rr < 2) + err = 1; + } else { + if (extra_ref && rr < 2) + err = 1; + else if (!extra_ref && rr < 1) + err = 1; + } + if (err) { + mlog(ML_ERROR, "%s:%.*s: got assert master from %u " + "that will mess up this node, refs=%d, extra=%d, " + "inuse=%d\n", dlm->name, namelen, name, + assert->node_idx, rr, extra_ref, mle->inuse); + dlm_print_one_mle(mle); + } + list_del_init(&mle->list); + __dlm_mle_detach_hb_events(dlm, mle); + __dlm_put_mle(mle); if (extra_ref) { /* the assert master message now balances the extra * ref given by the master / migration request message. * if this is the last put, it will be removed * from the list. */ - dlm_put_mle(mle); + __dlm_put_mle(mle); + } + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + } else if (res) { + if (res->owner != assert->node_idx) { + mlog(0, "assert_master from %u, but current " + "owner is %u (%.*s), no mle\n", assert->node_idx, + res->owner, namelen, name); } } @@ -1788,12 +1924,12 @@ done: kill: /* kill the caller! */ + mlog(ML_ERROR, "Bad message received from another node. Dumping state " + "and killing the other node now! This node is OK and can continue.\n"); + __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); dlm_lockres_put(res); - mlog(ML_ERROR, "Bad message received from another node. Dumping state " - "and killing the other node now! This node is OK and can continue.\n"); - dlm_dump_lock_resources(dlm); dlm_put(dlm); return -EINVAL; } @@ -1803,7 +1939,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, int ignore_higher, u8 request_from, u32 flags) { struct dlm_work_item *item; - item = kcalloc(1, sizeof(*item), GFP_KERNEL); + item = kcalloc(1, sizeof(*item), GFP_NOFS); if (!item) return -ENOMEM; @@ -1825,7 +1961,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); - schedule_work(&dlm->dispatched_work); + queue_work(dlm->dlm_worker, &dlm->dispatched_work); return 0; } @@ -1866,6 +2002,23 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) } } + /* + * If we're migrating this lock to someone else, we are no + * longer allowed to assert out own mastery. OTOH, we need to + * prevent migration from starting while we're still asserting + * our dominance. The reserved ast delays migration. + */ + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_MIGRATING) { + mlog(0, "Someone asked us to assert mastery, but we're " + "in the middle of migration. Skipping assert, " + "the new master will handle that.\n"); + spin_unlock(&res->spinlock); + goto put; + } else + __dlm_lockres_reserve_ast(res); + spin_unlock(&res->spinlock); + /* this call now finishes out the nodemap * even if one or more nodes die */ mlog(0, "worker about to master %.*s here, this=%u\n", @@ -1875,9 +2028,14 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) nodemap, flags); if (ret < 0) { /* no need to restart, we are done */ - mlog_errno(ret); + if (!dlm_is_host_down(ret)) + mlog_errno(ret); } + /* Ok, we've asserted ourselves. Let's let migration start. */ + dlm_lockres_release_ast(dlm, res); + +put: dlm_lockres_put(res); mlog(0, "finished with dlm_assert_master_worker\n"); @@ -1916,6 +2074,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, BUG(); /* host is down, so answer for that node would be * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ + ret = 0; } if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { @@ -2016,14 +2175,14 @@ int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, */ ret = -ENOMEM; - mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL); + mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); if (!mres) { mlog_errno(ret); goto leave; } mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, - GFP_KERNEL); + GFP_NOFS); if (!mle) { mlog_errno(ret); goto leave; @@ -2117,7 +2276,7 @@ fail: * take both dlm->spinlock and dlm->master_lock */ spin_lock(&dlm->spinlock); spin_lock(&dlm->master_lock); - dlm_get_mle(mle); + dlm_get_mle_inuse(mle); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); @@ -2134,7 +2293,10 @@ fail: /* migration failed, detach and clean up mle */ dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle(mle); - dlm_put_mle(mle); + dlm_put_mle_inuse(mle); + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_MIGRATING; + spin_unlock(&res->spinlock); goto leave; } @@ -2164,8 +2326,8 @@ fail: /* avoid hang during shutdown when migrating lockres * to a node which also goes down */ if (dlm_is_node_dead(dlm, target)) { - mlog(0, "%s:%.*s: expected migration target %u " - "is no longer up. restarting.\n", + mlog(0, "%s:%.*s: expected migration " + "target %u is no longer up, restarting\n", dlm->name, res->lockname.len, res->lockname.name, target); ret = -ERESTARTSYS; @@ -2175,7 +2337,10 @@ fail: /* migration failed, detach and clean up mle */ dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle(mle); - dlm_put_mle(mle); + dlm_put_mle_inuse(mle); + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_MIGRATING; + spin_unlock(&res->spinlock); goto leave; } /* TODO: if node died: stop, clean up, return error */ @@ -2191,7 +2356,7 @@ fail: /* master is known, detach if not already detached */ dlm_mle_detach_hb_events(dlm, mle); - dlm_put_mle(mle); + dlm_put_mle_inuse(mle); ret = 0; dlm_lockres_calc_usage(dlm, res); @@ -2210,7 +2375,6 @@ leave: mlog(0, "returning %d\n", ret); return ret; } -EXPORT_SYMBOL_GPL(dlm_migrate_lockres); int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) { @@ -2462,7 +2626,7 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data) struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; const char *name; - unsigned int namelen; + unsigned int namelen, hash; int ret = 0; if (!dlm_grab(dlm)) @@ -2470,10 +2634,11 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data) name = migrate->name; namelen = migrate->namelen; + hash = dlm_lockid_hash(name, namelen); /* preallocate.. if this fails, abort */ mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, - GFP_KERNEL); + GFP_NOFS); if (!mle) { ret = -ENOMEM; @@ -2482,7 +2647,7 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data) /* check for pre-existing lock */ spin_lock(&dlm->spinlock); - res = __dlm_lookup_lockres(dlm, name, namelen); + res = __dlm_lookup_lockres(dlm, name, namelen, hash); spin_lock(&dlm->master_lock); if (res) { @@ -2580,6 +2745,7 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm, /* remove it from the list so that only one * mle will be found */ list_del_init(&tmp->list); + __dlm_mle_detach_hb_events(dlm, mle); } spin_unlock(&tmp->spinlock); } @@ -2601,6 +2767,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) struct list_head *iter, *iter2; struct dlm_master_list_entry *mle; struct dlm_lock_resource *res; + unsigned int hash; mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); top: @@ -2640,7 +2807,7 @@ top: * may result in the mle being unlinked and * freed, but there may still be a process * waiting in the dlmlock path which is fine. */ - mlog(ML_ERROR, "node %u was expected master\n", + mlog(0, "node %u was expected master\n", dead_node); atomic_set(&mle->woken, 1); spin_unlock(&mle->spinlock); @@ -2673,19 +2840,21 @@ top: /* remove from the list early. NOTE: unlinking * list_head while in list_for_each_safe */ + __dlm_mle_detach_hb_events(dlm, mle); spin_lock(&mle->spinlock); list_del_init(&mle->list); atomic_set(&mle->woken, 1); spin_unlock(&mle->spinlock); wake_up(&mle->wq); - mlog(0, "node %u died during migration from " - "%u to %u!\n", dead_node, + mlog(0, "%s: node %u died during migration from " + "%u to %u!\n", dlm->name, dead_node, mle->master, mle->new_master); /* if there is a lockres associated with this * mle, find it and set its owner to UNKNOWN */ + hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len); res = __dlm_lookup_lockres(dlm, mle->u.name.name, - mle->u.name.len); + mle->u.name.len, hash); if (res) { /* unfortunately if we hit this rare case, our * lock ordering is messed. we need to drop diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 805cbabac05..9d950d7cea3 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -95,11 +95,14 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data); static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); +static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 *real_master); static u64 dlm_get_next_mig_cookie(void); -static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED; -static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(dlm_reco_state_lock); +static DEFINE_SPINLOCK(dlm_mig_cookie_lock); static u64 dlm_mig_cookie = 1; static u64 dlm_get_next_mig_cookie(void) @@ -115,12 +118,37 @@ static u64 dlm_get_next_mig_cookie(void) return c; } +static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, + u8 dead_node) +{ + assert_spin_locked(&dlm->spinlock); + if (dlm->reco.dead_node != dead_node) + mlog(0, "%s: changing dead_node from %u to %u\n", + dlm->name, dlm->reco.dead_node, dead_node); + dlm->reco.dead_node = dead_node; +} + +static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, + u8 master) +{ + assert_spin_locked(&dlm->spinlock); + mlog(0, "%s: changing new_master from %u to %u\n", + dlm->name, dlm->reco.new_master, master); + dlm->reco.new_master = master; +} + +static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) +{ + assert_spin_locked(&dlm->spinlock); + clear_bit(dlm->reco.dead_node, dlm->recovery_map); + dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); + dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); +} + static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) { spin_lock(&dlm->spinlock); - clear_bit(dlm->reco.dead_node, dlm->recovery_map); - dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; - dlm->reco.new_master = O2NM_INVALID_NODE_NUM; + __dlm_reset_recovery(dlm); spin_unlock(&dlm->spinlock); } @@ -132,12 +160,21 @@ void dlm_dispatch_work(void *data) struct list_head *iter, *iter2; struct dlm_work_item *item; dlm_workfunc_t *workfunc; + int tot=0; + + if (!dlm_joined(dlm)) + return; spin_lock(&dlm->work_lock); list_splice_init(&dlm->work_list, &tmp_list); spin_unlock(&dlm->work_lock); list_for_each_safe(iter, iter2, &tmp_list) { + tot++; + } + mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); + + list_for_each_safe(iter, iter2, &tmp_list) { item = list_entry(iter, struct dlm_work_item, list); workfunc = item->func; list_del_init(&item->list); @@ -220,6 +257,52 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) * */ +static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) +{ + struct dlm_reco_node_data *ndata; + struct dlm_lock_resource *res; + + mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", + dlm->name, dlm->dlm_reco_thread_task->pid, + dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", + dlm->reco.dead_node, dlm->reco.new_master); + + list_for_each_entry(ndata, &dlm->reco.node_data, list) { + char *st = "unknown"; + switch (ndata->state) { + case DLM_RECO_NODE_DATA_INIT: + st = "init"; + break; + case DLM_RECO_NODE_DATA_REQUESTING: + st = "requesting"; + break; + case DLM_RECO_NODE_DATA_DEAD: + st = "dead"; + break; + case DLM_RECO_NODE_DATA_RECEIVING: + st = "receiving"; + break; + case DLM_RECO_NODE_DATA_REQUESTED: + st = "requested"; + break; + case DLM_RECO_NODE_DATA_DONE: + st = "done"; + break; + case DLM_RECO_NODE_DATA_FINALIZE_SENT: + st = "finalize-sent"; + break; + default: + st = "bad"; + break; + } + mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", + dlm->name, ndata->node_num, st); + } + list_for_each_entry(res, &dlm->reco.resources, recovering) { + mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", + dlm->name, res->lockname.len, res->lockname.name); + } +} #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) @@ -267,11 +350,23 @@ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) { int dead; spin_lock(&dlm->spinlock); - dead = test_bit(node, dlm->domain_map); + dead = !test_bit(node, dlm->domain_map); spin_unlock(&dlm->spinlock); return dead; } +/* returns true if node is no longer in the domain + * could be dead or just not joined */ +static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) +{ + int recovered; + spin_lock(&dlm->spinlock); + recovered = !test_bit(node, dlm->recovery_map); + spin_unlock(&dlm->spinlock); + return recovered; +} + + int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) { if (timeout) { @@ -290,6 +385,24 @@ int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) return 0; } +int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) +{ + if (timeout) { + mlog(0, "%s: waiting %dms for notification of " + "recovery of node %u\n", dlm->name, timeout, node); + wait_event_timeout(dlm->dlm_reco_thread_wq, + dlm_is_node_recovered(dlm, node), + msecs_to_jiffies(timeout)); + } else { + mlog(0, "%s: waiting indefinitely for notification " + "of recovery of node %u\n", dlm->name, node); + wait_event(dlm->dlm_reco_thread_wq, + dlm_is_node_recovered(dlm, node)); + } + /* for now, return 0 */ + return 0; +} + /* callers of the top-level api calls (dlmlock/dlmunlock) should * block on the dlm->reco.event when recovery is in progress. * the dlm recovery thread will set this state when it begins @@ -308,6 +421,13 @@ static int dlm_in_recovery(struct dlm_ctxt *dlm) void dlm_wait_for_recovery(struct dlm_ctxt *dlm) { + if (dlm_in_recovery(dlm)) { + mlog(0, "%s: reco thread %d in recovery: " + "state=%d, master=%u, dead=%u\n", + dlm->name, dlm->dlm_reco_thread_task->pid, + dlm->reco.state, dlm->reco.new_master, + dlm->reco.dead_node); + } wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); } @@ -341,7 +461,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) mlog(0, "new master %u died while recovering %u!\n", dlm->reco.new_master, dlm->reco.dead_node); /* unset the new_master, leave dead_node */ - dlm->reco.new_master = O2NM_INVALID_NODE_NUM; + dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); } /* select a target to recover */ @@ -350,14 +470,14 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); if (bit >= O2NM_MAX_NODES || bit < 0) - dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; + dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); else - dlm->reco.dead_node = bit; + dlm_set_reco_dead_node(dlm, bit); } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { /* BUG? */ mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", dlm->reco.dead_node); - dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; + dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); } if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { @@ -366,7 +486,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) /* return to main thread loop and sleep. */ return 0; } - mlog(0, "recovery thread found node %u in the recovery map!\n", + mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", + dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.dead_node); spin_unlock(&dlm->spinlock); @@ -389,8 +510,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) } mlog(0, "another node will master this recovery session.\n"); } - mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n", - dlm->name, dlm->reco.new_master, + mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", + dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master, dlm->node_num, dlm->reco.dead_node); /* it is safe to start everything back up here @@ -402,11 +523,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) return 0; master_here: - mlog(0, "mastering recovery of %s:%u here(this=%u)!\n", + mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n", + dlm->dlm_reco_thread_task->pid, dlm->name, dlm->reco.dead_node, dlm->node_num); status = dlm_remaster_locks(dlm, dlm->reco.dead_node); if (status < 0) { + /* we should never hit this anymore */ mlog(ML_ERROR, "error %d remastering locks for node %u, " "retrying.\n", status, dlm->reco.dead_node); /* yield a bit to allow any final network messages @@ -433,9 +556,16 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) int destroy = 0; int pass = 0; - status = dlm_init_recovery_area(dlm, dead_node); - if (status < 0) - goto leave; + do { + /* we have become recovery master. there is no escaping + * this, so just keep trying until we get it. */ + status = dlm_init_recovery_area(dlm, dead_node); + if (status < 0) { + mlog(ML_ERROR, "%s: failed to alloc recovery area, " + "retrying\n", dlm->name); + msleep(1000); + } + } while (status != 0); /* safe to access the node data list without a lock, since this * process is the only one to change the list */ @@ -452,16 +582,36 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) continue; } - status = dlm_request_all_locks(dlm, ndata->node_num, dead_node); - if (status < 0) { - mlog_errno(status); - if (dlm_is_host_down(status)) - ndata->state = DLM_RECO_NODE_DATA_DEAD; - else { - destroy = 1; - goto leave; + do { + status = dlm_request_all_locks(dlm, ndata->node_num, + dead_node); + if (status < 0) { + mlog_errno(status); + if (dlm_is_host_down(status)) { + /* node died, ignore it for recovery */ + status = 0; + ndata->state = DLM_RECO_NODE_DATA_DEAD; + /* wait for the domain map to catch up + * with the network state. */ + wait_event_timeout(dlm->dlm_reco_thread_wq, + dlm_is_node_dead(dlm, + ndata->node_num), + msecs_to_jiffies(1000)); + mlog(0, "waited 1 sec for %u, " + "dead? %s\n", ndata->node_num, + dlm_is_node_dead(dlm, ndata->node_num) ? + "yes" : "no"); + } else { + /* -ENOMEM on the other node */ + mlog(0, "%s: node %u returned " + "%d during recovery, retrying " + "after a short wait\n", + dlm->name, ndata->node_num, + status); + msleep(100); + } } - } + } while (status != 0); switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: @@ -473,10 +623,9 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) mlog(0, "node %u died after requesting " "recovery info for node %u\n", ndata->node_num, dead_node); - // start all over - destroy = 1; - status = -EAGAIN; - goto leave; + /* fine. don't need this node's info. + * continue without it. */ + break; case DLM_RECO_NODE_DATA_REQUESTING: ndata->state = DLM_RECO_NODE_DATA_REQUESTED; mlog(0, "now receiving recovery data from " @@ -520,35 +669,26 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) BUG(); break; case DLM_RECO_NODE_DATA_DEAD: - mlog(ML_NOTICE, "node %u died after " + mlog(0, "node %u died after " "requesting recovery info for " "node %u\n", ndata->node_num, dead_node); - spin_unlock(&dlm_reco_state_lock); - // start all over - destroy = 1; - status = -EAGAIN; - /* instead of spinning like crazy here, - * wait for the domain map to catch up - * with the network state. otherwise this - * can be hit hundreds of times before - * the node is really seen as dead. */ - wait_event_timeout(dlm->dlm_reco_thread_wq, - dlm_is_node_dead(dlm, - ndata->node_num), - msecs_to_jiffies(1000)); - mlog(0, "waited 1 sec for %u, " - "dead? %s\n", ndata->node_num, - dlm_is_node_dead(dlm, ndata->node_num) ? - "yes" : "no"); - goto leave; + break; case DLM_RECO_NODE_DATA_RECEIVING: case DLM_RECO_NODE_DATA_REQUESTED: + mlog(0, "%s: node %u still in state %s\n", + dlm->name, ndata->node_num, + ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? + "receiving" : "requested"); all_nodes_done = 0; break; case DLM_RECO_NODE_DATA_DONE: + mlog(0, "%s: node %u state is done\n", + dlm->name, ndata->node_num); break; case DLM_RECO_NODE_DATA_FINALIZE_SENT: + mlog(0, "%s: node %u state is finalize\n", + dlm->name, ndata->node_num); break; } } @@ -578,7 +718,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) jiffies, dlm->reco.dead_node, dlm->node_num, dlm->reco.new_master); destroy = 1; - status = ret; + status = 0; /* rescan everything marked dirty along the way */ dlm_kick_thread(dlm, NULL); break; @@ -591,7 +731,6 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) } -leave: if (destroy) dlm_destroy_recovery_area(dlm, dead_node); @@ -617,7 +756,7 @@ static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) } BUG_ON(num == dead_node); - ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL); + ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS); if (!ndata) { dlm_destroy_recovery_area(dlm, dead_node); return -ENOMEM; @@ -691,16 +830,25 @@ int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data) if (!dlm_grab(dlm)) return -EINVAL; + if (lr->dead_node != dlm->reco.dead_node) { + mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " + "dead_node is %u\n", dlm->name, lr->node_idx, + lr->dead_node, dlm->reco.dead_node); + dlm_print_reco_node_status(dlm); + /* this is a hack */ + dlm_put(dlm); + return -ENOMEM; + } BUG_ON(lr->dead_node != dlm->reco.dead_node); - item = kcalloc(1, sizeof(*item), GFP_KERNEL); + item = kcalloc(1, sizeof(*item), GFP_NOFS); if (!item) { dlm_put(dlm); return -ENOMEM; } /* this will get freed by dlm_request_all_locks_worker */ - buf = (char *) __get_free_page(GFP_KERNEL); + buf = (char *) __get_free_page(GFP_NOFS); if (!buf) { kfree(item); dlm_put(dlm); @@ -715,7 +863,7 @@ int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data) spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); - schedule_work(&dlm->dispatched_work); + queue_work(dlm->dlm_worker, &dlm->dispatched_work); dlm_put(dlm); return 0; @@ -730,32 +878,34 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) struct list_head *iter; int ret; u8 dead_node, reco_master; + int skip_all_done = 0; dlm = item->dlm; dead_node = item->u.ral.dead_node; reco_master = item->u.ral.reco_master; mres = (struct dlm_migratable_lockres *)data; + mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", + dlm->name, dead_node, reco_master); + if (dead_node != dlm->reco.dead_node || reco_master != dlm->reco.new_master) { - /* show extra debug info if the recovery state is messed */ - mlog(ML_ERROR, "%s: bad reco state: reco(dead=%u, master=%u), " - "request(dead=%u, master=%u)\n", - dlm->name, dlm->reco.dead_node, dlm->reco.new_master, - dead_node, reco_master); - mlog(ML_ERROR, "%s: name=%.*s master=%u locks=%u/%u flags=%u " - "entry[0]={c=%u:%llu,l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n", - dlm->name, mres->lockname_len, mres->lockname, mres->master, - mres->num_locks, mres->total_locks, mres->flags, - dlm_get_lock_cookie_node(mres->ml[0].cookie), - dlm_get_lock_cookie_seq(mres->ml[0].cookie), - mres->ml[0].list, mres->ml[0].flags, - mres->ml[0].type, mres->ml[0].convert_type, - mres->ml[0].highest_blocked, mres->ml[0].node); - BUG(); + /* worker could have been created before the recovery master + * died. if so, do not continue, but do not error. */ + if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { + mlog(ML_NOTICE, "%s: will not send recovery state, " + "recovery master %u died, thread=(dead=%u,mas=%u)" + " current=(dead=%u,mas=%u)\n", dlm->name, + reco_master, dead_node, reco_master, + dlm->reco.dead_node, dlm->reco.new_master); + } else { + mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " + "master=%u), request(dead=%u, master=%u)\n", + dlm->name, dlm->reco.dead_node, + dlm->reco.new_master, dead_node, reco_master); + } + goto leave; } - BUG_ON(dead_node != dlm->reco.dead_node); - BUG_ON(reco_master != dlm->reco.new_master); /* lock resources should have already been moved to the * dlm->reco.resources list. now move items from that list @@ -766,12 +916,20 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) dlm_move_reco_locks_to_list(dlm, &resources, dead_node); /* now we can begin blasting lockreses without the dlm lock */ + + /* any errors returned will be due to the new_master dying, + * the dlm_reco_thread should detect this */ list_for_each(iter, &resources) { res = list_entry (iter, struct dlm_lock_resource, recovering); ret = dlm_send_one_lockres(dlm, res, mres, reco_master, DLM_MRES_RECOVERY); - if (ret < 0) - mlog_errno(ret); + if (ret < 0) { + mlog(ML_ERROR, "%s: node %u went down while sending " + "recovery state for dead node %u, ret=%d\n", dlm->name, + reco_master, dead_node, ret); + skip_all_done = 1; + break; + } } /* move the resources back to the list */ @@ -779,10 +937,15 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) list_splice_init(&resources, &dlm->reco.resources); spin_unlock(&dlm->spinlock); - ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); - if (ret < 0) - mlog_errno(ret); - + if (!skip_all_done) { + ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); + if (ret < 0) { + mlog(ML_ERROR, "%s: node %u went down while sending " + "recovery all-done for dead node %u, ret=%d\n", + dlm->name, reco_master, dead_node, ret); + } + } +leave: free_page((unsigned long)data); } @@ -801,8 +964,14 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, sizeof(done_msg), send_to, &tmpret); - /* negative status is ignored by the caller */ - if (ret >= 0) + if (ret < 0) { + if (!dlm_is_host_down(ret)) { + mlog_errno(ret); + mlog(ML_ERROR, "%s: unknown error sending data-done " + "to %u\n", dlm->name, send_to); + BUG(); + } + } else ret = tmpret; return ret; } @@ -822,7 +991,11 @@ int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data) mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " "node_idx=%u, this node=%u\n", done->dead_node, dlm->reco.dead_node, done->node_idx, dlm->node_num); - BUG_ON(done->dead_node != dlm->reco.dead_node); + + mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), + "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " + "node_idx=%u, this node=%u\n", done->dead_node, + dlm->reco.dead_node, done->node_idx, dlm->node_num); spin_lock(&dlm_reco_state_lock); list_for_each(iter, &dlm->reco.node_data) { @@ -905,13 +1078,11 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, mlog(0, "found lockres owned by dead node while " "doing recovery for node %u. sending it.\n", dead_node); - list_del_init(&res->recovering); - list_add_tail(&res->recovering, list); + list_move_tail(&res->recovering, list); } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "found UNKNOWN owner while doing recovery " "for node %u. sending it.\n", dead_node); - list_del_init(&res->recovering); - list_add_tail(&res->recovering, list); + list_move_tail(&res->recovering, list); } } spin_unlock(&dlm->spinlock); @@ -1023,8 +1194,9 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock, ml->type == LKM_PRMODE) { /* if it is already set, this had better be a PR * and it has to match */ - if (mres->lvb[0] && (ml->type == LKM_EXMODE || - memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { + if (!dlm_lvb_is_empty(mres->lvb) && + (ml->type == LKM_EXMODE || + memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { mlog(ML_ERROR, "mismatched lvbs!\n"); __dlm_print_one_lock_resource(lock->lockres); BUG(); @@ -1083,22 +1255,25 @@ int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, * we must send it immediately. */ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); - if (ret < 0) { - // TODO - mlog(ML_ERROR, "dlm_send_mig_lockres_msg " - "returned %d, TODO\n", ret); - BUG(); - } + if (ret < 0) + goto error; } } /* flush any remaining locks */ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); - if (ret < 0) { - // TODO - mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, " - "TODO\n", ret); + if (ret < 0) + goto error; + return ret; + +error: + mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", + dlm->name, ret); + if (!dlm_is_host_down(ret)) BUG(); - } + mlog(0, "%s: node %u went down while sending %s " + "lockres %.*s\n", dlm->name, send_to, + flags & DLM_MRES_RECOVERY ? "recovery" : "migration", + res->lockname.len, res->lockname.name); return ret; } @@ -1146,8 +1321,8 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data) mlog(0, "all done flag. all lockres data received!\n"); ret = -ENOMEM; - buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL); - item = kcalloc(1, sizeof(*item), GFP_KERNEL); + buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); + item = kcalloc(1, sizeof(*item), GFP_NOFS); if (!buf || !item) goto leave; @@ -1238,7 +1413,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data) spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); - schedule_work(&dlm->dispatched_work); + queue_work(dlm->dlm_worker, &dlm->dispatched_work); leave: dlm_put(dlm); @@ -1312,8 +1487,9 @@ leave: -int dlm_lockres_master_requery(struct dlm_ctxt *dlm, - struct dlm_lock_resource *res, u8 *real_master) +static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 *real_master) { struct dlm_node_iter iter; int nodenum; @@ -1406,6 +1582,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data) struct dlm_ctxt *dlm = data; struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; struct dlm_lock_resource *res = NULL; + unsigned int hash; int master = DLM_LOCK_RES_OWNER_UNKNOWN; u32 flags = DLM_ASSERT_MASTER_REQUERY; @@ -1415,8 +1592,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data) return master; } + hash = dlm_lockid_hash(req->name, req->namelen); + spin_lock(&dlm->spinlock); - res = __dlm_lookup_lockres(dlm, req->name, req->namelen); + res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); if (res) { spin_lock(&res->spinlock); master = res->owner; @@ -1483,7 +1662,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock *newlock = NULL; struct dlm_lockstatus *lksb = NULL; int ret = 0; - int i; + int i, bad; struct list_head *iter; struct dlm_lock *lock = NULL; @@ -1529,8 +1708,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, /* move the lock to its proper place */ /* do not alter lock refcount. switching lists. */ - list_del_init(&lock->list); - list_add_tail(&lock->list, queue); + list_move_tail(&lock->list, queue); spin_unlock(&res->spinlock); mlog(0, "just reordered a local lock!\n"); @@ -1553,28 +1731,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, } lksb->flags |= (ml->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); - - if (mres->lvb[0]) { + + if (ml->type == LKM_NLMODE) + goto skip_lvb; + + if (!dlm_lvb_is_empty(mres->lvb)) { if (lksb->flags & DLM_LKSB_PUT_LVB) { /* other node was trying to update * lvb when node died. recreate the * lksb with the updated lvb. */ memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); + /* the lock resource lvb update must happen + * NOW, before the spinlock is dropped. + * we no longer wait for the AST to update + * the lvb. */ + memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } else { /* otherwise, the node is sending its * most recent valid lvb info */ BUG_ON(ml->type != LKM_EXMODE && ml->type != LKM_PRMODE); - if (res->lvb[0] && (ml->type == LKM_EXMODE || - memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { - mlog(ML_ERROR, "received bad lvb!\n"); - __dlm_print_one_lock_resource(res); - BUG(); + if (!dlm_lvb_is_empty(res->lvb) && + (ml->type == LKM_EXMODE || + memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { + int i; + mlog(ML_ERROR, "%s:%.*s: received bad " + "lvb! type=%d\n", dlm->name, + res->lockname.len, + res->lockname.name, ml->type); + printk("lockres lvb=["); + for (i=0; i<DLM_LVB_LEN; i++) + printk("%02x", res->lvb[i]); + printk("]\nmigrated lvb=["); + for (i=0; i<DLM_LVB_LEN; i++) + printk("%02x", mres->lvb[i]); + printk("]\n"); + dlm_print_one_lock_resource(res); + BUG(); } memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } } - +skip_lvb: /* NOTE: * wrt lock queue ordering and recovery: @@ -1592,9 +1790,33 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, * relative to each other, but clearly *not* * preserved relative to locks from other nodes. */ + bad = 0; spin_lock(&res->spinlock); - dlm_lock_get(newlock); - list_add_tail(&newlock->list, queue); + list_for_each_entry(lock, queue, list) { + if (lock->ml.cookie == ml->cookie) { + u64 c = lock->ml.cookie; + mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " + "exists on this lockres!\n", dlm->name, + res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(c), + dlm_get_lock_cookie_seq(c)); + + mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " + "node=%u, cookie=%u:%llu, queue=%d\n", + ml->type, ml->convert_type, ml->node, + dlm_get_lock_cookie_node(ml->cookie), + dlm_get_lock_cookie_seq(ml->cookie), + ml->list); + + __dlm_print_one_lock_resource(res); + bad = 1; + break; + } + } + if (!bad) { + dlm_lock_get(newlock); + list_add_tail(&newlock->list, queue); + } spin_unlock(&res->spinlock); } mlog(0, "done running all the locks\n"); @@ -1618,8 +1840,14 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, struct dlm_lock *lock; res->state |= DLM_LOCK_RES_RECOVERING; - if (!list_empty(&res->recovering)) + if (!list_empty(&res->recovering)) { + mlog(0, + "Recovering res %s:%.*s, is already on recovery list!\n", + dlm->name, res->lockname.len, res->lockname.name); list_del_init(&res->recovering); + } + /* We need to hold a reference while on the recovery list */ + dlm_lockres_get(res); list_add_tail(&res->recovering, &dlm->reco.resources); /* find any pending locks and put them back on proper list */ @@ -1708,9 +1936,11 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, spin_lock(&res->spinlock); dlm_change_lockres_owner(dlm, res, new_master); res->state &= ~DLM_LOCK_RES_RECOVERING; - __dlm_dirty_lockres(dlm, res); + if (!__dlm_lockres_unused(res)) + __dlm_dirty_lockres(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); + dlm_lockres_put(res); } } @@ -1719,7 +1949,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, * the RECOVERING state and set the owner * if necessary */ for (i = 0; i < DLM_HASH_BUCKETS; i++) { - bucket = &(dlm->lockres_hash[i]); + bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry(res, hash_iter, bucket, hash_node) { if (res->state & DLM_LOCK_RES_RECOVERING) { if (res->owner == dead_node) { @@ -1743,11 +1973,13 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, dlm->name, res->lockname.len, res->lockname.name, res->owner); list_del_init(&res->recovering); + dlm_lockres_put(res); } spin_lock(&res->spinlock); dlm_change_lockres_owner(dlm, res, new_master); res->state &= ~DLM_LOCK_RES_RECOVERING; - __dlm_dirty_lockres(dlm, res); + if (!__dlm_lockres_unused(res)) + __dlm_dirty_lockres(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); } @@ -1884,7 +2116,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) * need to be fired as a result. */ for (i = 0; i < DLM_HASH_BUCKETS; i++) { - bucket = &(dlm->lockres_hash[i]); + bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry(res, iter, bucket, hash_node) { /* always prune any $RECOVERY entries for dead nodes, * otherwise hangs can occur during later recovery */ @@ -1924,6 +2156,20 @@ static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) { assert_spin_locked(&dlm->spinlock); + if (dlm->reco.new_master == idx) { + mlog(0, "%s: recovery master %d just died\n", + dlm->name, idx); + if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { + /* finalize1 was reached, so it is safe to clear + * the new_master and dead_node. that recovery + * is complete. */ + mlog(0, "%s: dead master %d had reached " + "finalize1 state, clearing\n", dlm->name, idx); + dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; + __dlm_reset_recovery(dlm); + } + } + /* check to see if the node is already considered dead */ if (!test_bit(idx, dlm->live_nodes_map)) { mlog(0, "for domain %s, node %d is already dead. " @@ -2039,7 +2285,8 @@ again: memset(&lksb, 0, sizeof(lksb)); ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, - DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast); + DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, + dlm_reco_ast, dlm, dlm_reco_bast); mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", dlm->name, ret, lksb.status); @@ -2087,7 +2334,7 @@ again: /* set the new_master to this node */ spin_lock(&dlm->spinlock); - dlm->reco.new_master = dlm->node_num; + dlm_set_reco_master(dlm, dlm->node_num); spin_unlock(&dlm->spinlock); } @@ -2125,6 +2372,10 @@ again: mlog(0, "%s: reco master %u is ready to recover %u\n", dlm->name, dlm->reco.new_master, dlm->reco.dead_node); status = -EEXIST; + } else if (ret == DLM_RECOVERING) { + mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", + dlm->name, dlm->node_num); + goto again; } else { struct dlm_lock_resource *res; @@ -2156,7 +2407,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) mlog_entry("%u\n", dead_node); - mlog(0, "dead node is %u\n", dead_node); + mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); @@ -2214,6 +2465,14 @@ retry: * another ENOMEM */ msleep(100); goto retry; + } else if (ret == EAGAIN) { + mlog(0, "%s: trying to start recovery of node " + "%u, but node %u is waiting for last recovery " + "to complete, backoff for a bit\n", dlm->name, + dead_node, nodenum); + /* TODO Look into replacing msleep with cond_resched() */ + msleep(100); + goto retry; } } @@ -2229,8 +2488,20 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data) if (!dlm_grab(dlm)) return 0; - mlog(0, "node %u wants to recover node %u\n", - br->node_idx, br->dead_node); + spin_lock(&dlm->spinlock); + if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { + mlog(0, "%s: node %u wants to recover node %u (%u:%u) " + "but this node is in finalize state, waiting on finalize2\n", + dlm->name, br->node_idx, br->dead_node, + dlm->reco.dead_node, dlm->reco.new_master); + spin_unlock(&dlm->spinlock); + return EAGAIN; + } + spin_unlock(&dlm->spinlock); + + mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", + dlm->name, br->node_idx, br->dead_node, + dlm->reco.dead_node, dlm->reco.new_master); dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); @@ -2252,8 +2523,8 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data) "node %u changing it to %u\n", dlm->name, dlm->reco.dead_node, br->node_idx, br->dead_node); } - dlm->reco.new_master = br->node_idx; - dlm->reco.dead_node = br->dead_node; + dlm_set_reco_master(dlm, br->node_idx); + dlm_set_reco_dead_node(dlm, br->dead_node); if (!test_bit(br->dead_node, dlm->recovery_map)) { mlog(0, "recovery master %u sees %u as dead, but this " "node has not yet. marking %u as dead\n", @@ -2272,10 +2543,16 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data) spin_unlock(&dlm->spinlock); dlm_kick_recovery_thread(dlm); + + mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", + dlm->name, br->node_idx, br->dead_node, + dlm->reco.dead_node, dlm->reco.new_master); + dlm_put(dlm); return 0; } +#define DLM_FINALIZE_STAGE2 0x01 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) { int ret = 0; @@ -2283,25 +2560,31 @@ static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) struct dlm_node_iter iter; int nodenum; int status; + int stage = 1; - mlog(0, "finishing recovery for node %s:%u\n", - dlm->name, dlm->reco.dead_node); + mlog(0, "finishing recovery for node %s:%u, " + "stage %d\n", dlm->name, dlm->reco.dead_node, stage); spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); spin_unlock(&dlm->spinlock); +stage2: memset(&fr, 0, sizeof(fr)); fr.node_idx = dlm->node_num; fr.dead_node = dlm->reco.dead_node; + if (stage == 2) + fr.flags |= DLM_FINALIZE_STAGE2; while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { if (nodenum == dlm->node_num) continue; ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, &fr, sizeof(fr), nodenum, &status); - if (ret >= 0) { + if (ret >= 0) ret = status; + if (ret < 0) { + mlog_errno(ret); if (dlm_is_host_down(ret)) { /* this has no effect on this recovery * session, so set the status to zero to @@ -2309,13 +2592,17 @@ static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) mlog(ML_ERROR, "node %u went down after this " "node finished recovery.\n", nodenum); ret = 0; + continue; } - } - if (ret < 0) { - mlog_errno(ret); break; } } + if (stage == 1) { + /* reset the node_iter back to the top and send finalize2 */ + iter.curnode = -1; + stage = 2; + goto stage2; + } return ret; } @@ -2324,14 +2611,19 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data) { struct dlm_ctxt *dlm = data; struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; + int stage = 1; /* ok to return 0, domain has gone away */ if (!dlm_grab(dlm)) return 0; - mlog(0, "node %u finalizing recovery of node %u\n", - fr->node_idx, fr->dead_node); + if (fr->flags & DLM_FINALIZE_STAGE2) + stage = 2; + mlog(0, "%s: node %u finalizing recovery stage%d of " + "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, + fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); + spin_lock(&dlm->spinlock); if (dlm->reco.new_master != fr->node_idx) { @@ -2347,13 +2639,41 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data) BUG(); } - dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); - - spin_unlock(&dlm->spinlock); + switch (stage) { + case 1: + dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); + if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { + mlog(ML_ERROR, "%s: received finalize1 from " + "new master %u for dead node %u, but " + "this node has already received it!\n", + dlm->name, fr->node_idx, fr->dead_node); + dlm_print_reco_node_status(dlm); + BUG(); + } + dlm->reco.state |= DLM_RECO_STATE_FINALIZE; + spin_unlock(&dlm->spinlock); + break; + case 2: + if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { + mlog(ML_ERROR, "%s: received finalize2 from " + "new master %u for dead node %u, but " + "this node did not have finalize1!\n", + dlm->name, fr->node_idx, fr->dead_node); + dlm_print_reco_node_status(dlm); + BUG(); + } + dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; + spin_unlock(&dlm->spinlock); + dlm_reset_recovery(dlm); + dlm_kick_recovery_thread(dlm); + break; + default: + BUG(); + } - dlm_reset_recovery(dlm); + mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", + dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); - dlm_kick_recovery_thread(dlm); dlm_put(dlm); return 0; } diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 5be9d14f12c..0c822f3ffb0 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -39,6 +39,7 @@ #include <linux/inet.h> #include <linux/timer.h> #include <linux/kthread.h> +#include <linux/delay.h> #include "cluster/heartbeat.h" @@ -53,6 +54,8 @@ #include "cluster/masklog.h" static int dlm_thread(void *data); +static void dlm_purge_lockres_now(struct dlm_ctxt *dlm, + struct dlm_lock_resource *lockres); static void dlm_flush_asts(struct dlm_ctxt *dlm); @@ -80,7 +83,7 @@ repeat: } -static int __dlm_lockres_unused(struct dlm_lock_resource *res) +int __dlm_lockres_unused(struct dlm_lock_resource *res) { if (list_empty(&res->granted) && list_empty(&res->converting) && @@ -103,6 +106,20 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, assert_spin_locked(&res->spinlock); if (__dlm_lockres_unused(res)){ + /* For now, just keep any resource we master */ + if (res->owner == dlm->node_num) + { + if (!list_empty(&res->purge)) { + mlog(0, "we master %s:%.*s, but it is on " + "the purge list. Removing\n", + dlm->name, res->lockname.len, + res->lockname.name); + list_del_init(&res->purge); + dlm->purge_count--; + } + return; + } + if (list_empty(&res->purge)) { mlog(0, "putting lockres %.*s from purge list\n", res->lockname.len, res->lockname.name); @@ -110,10 +127,23 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, res->last_used = jiffies; list_add_tail(&res->purge, &dlm->purge_list); dlm->purge_count++; + + /* if this node is not the owner, there is + * no way to keep track of who the owner could be. + * unhash it to avoid serious problems. */ + if (res->owner != dlm->node_num) { + mlog(0, "%s:%.*s: doing immediate " + "purge of lockres owned by %u\n", + dlm->name, res->lockname.len, + res->lockname.name, res->owner); + + dlm_purge_lockres_now(dlm, res); + } } } else if (!list_empty(&res->purge)) { - mlog(0, "removing lockres %.*s from purge list\n", - res->lockname.len, res->lockname.name); + mlog(0, "removing lockres %.*s from purge list, " + "owner=%u\n", res->lockname.len, res->lockname.name, + res->owner); list_del_init(&res->purge); dlm->purge_count--; @@ -165,6 +195,7 @@ again: } else if (ret < 0) { mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n", lockres->lockname.len, lockres->lockname.name); + msleep(100); goto again; } @@ -178,6 +209,24 @@ finish: __dlm_unhash_lockres(lockres); } +/* make an unused lockres go away immediately. + * as soon as the dlm spinlock is dropped, this lockres + * will not be found. kfree still happens on last put. */ +static void dlm_purge_lockres_now(struct dlm_ctxt *dlm, + struct dlm_lock_resource *lockres) +{ + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&lockres->spinlock); + + BUG_ON(!__dlm_lockres_unused(lockres)); + + if (!list_empty(&lockres->purge)) { + list_del_init(&lockres->purge); + dlm->purge_count--; + } + __dlm_unhash_lockres(lockres); +} + static void dlm_run_purge_list(struct dlm_ctxt *dlm, int purge_now) { @@ -318,8 +367,7 @@ converting: target->ml.type = target->ml.convert_type; target->ml.convert_type = LKM_IVMODE; - list_del_init(&target->list); - list_add_tail(&target->list, &res->granted); + list_move_tail(&target->list, &res->granted); BUG_ON(!target->lksb); target->lksb->status = DLM_NORMAL; @@ -380,8 +428,7 @@ blocked: target->ml.type, target->ml.node); // target->ml.type is already correct - list_del_init(&target->list); - list_add_tail(&target->list, &res->granted); + list_move_tail(&target->list, &res->granted); BUG_ON(!target->lksb); target->lksb->status = DLM_NORMAL; @@ -422,6 +469,8 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) /* don't shuffle secondary queues */ if ((res->owner == dlm->node_num) && !(res->state & DLM_LOCK_RES_DIRTY)) { + /* ref for dirty_list */ + dlm_lockres_get(res); list_add_tail(&res->dirty, &dlm->dirty_list); res->state |= DLM_LOCK_RES_DIRTY; } @@ -606,6 +655,8 @@ static int dlm_thread(void *data) list_del_init(&res->dirty); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); + /* Drop dirty_list ref */ + dlm_lockres_put(res); /* lockres can be re-dirtied/re-added to the * dirty_list in this gap, but that is ok */ @@ -642,8 +693,9 @@ static int dlm_thread(void *data) * spinlock and do NOT have the dlm lock. * safe to reserve/queue asts and run the lists. */ - mlog(0, "calling dlm_shuffle_lists with dlm=%p, " - "res=%p\n", dlm, res); + mlog(0, "calling dlm_shuffle_lists with dlm=%s, " + "res=%.*s\n", dlm->name, + res->lockname.len, res->lockname.name); /* called while holding lockres lock */ dlm_shuffle_lists(dlm, res); @@ -657,6 +709,8 @@ in_progress: /* if the lock was in-progress, stick * it on the back of the list */ if (delay) { + /* ref for dirty_list */ + dlm_lockres_get(res); spin_lock(&res->spinlock); list_add_tail(&res->dirty, &dlm->dirty_list); res->state |= DLM_LOCK_RES_DIRTY; @@ -677,7 +731,7 @@ in_progress: /* yield and continue right away if there is more work to do */ if (!n) { - yield(); + cond_resched(); continue; } diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 7b1a2754267..37be4b2e0d4 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -155,7 +155,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, else status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); - if (status != DLM_NORMAL) + if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node)) goto leave; /* By now this has been masked out of cancel requests. */ @@ -183,8 +183,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, spin_lock(&lock->spinlock); /* if the master told us the lock was already granted, * let the ast handle all of these actions */ - if (status == DLM_NORMAL && - lksb->status == DLM_CANCELGRANT) { + if (status == DLM_CANCELGRANT) { actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); @@ -271,8 +270,7 @@ void dlm_commit_pending_unlock(struct dlm_lock_resource *res, void dlm_commit_pending_cancel(struct dlm_lock_resource *res, struct dlm_lock *lock) { - list_del_init(&lock->list); - list_add_tail(&lock->list, &res->granted); + list_move_tail(&lock->list, &res->granted); lock->ml.convert_type = LKM_IVMODE; } @@ -319,6 +317,16 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); + if (owner == dlm->node_num) { + /* ended up trying to contact ourself. this means + * that the lockres had been remote but became local + * via a migration. just retry it, now as local */ + mlog(0, "%s:%.*s: this node became the master due to a " + "migration, re-evaluate now\n", dlm->name, + res->lockname.len, res->lockname.name); + return DLM_FORWARD; + } + memset(&unlock, 0, sizeof(unlock)); unlock.node_idx = dlm->node_num; unlock.flags = cpu_to_be32(flags); @@ -340,14 +348,9 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, vec, veclen, owner, &status); if (tmpret >= 0) { // successfully sent and received - if (status == DLM_CANCELGRANT) - ret = DLM_NORMAL; - else if (status == DLM_FORWARD) { + if (status == DLM_FORWARD) mlog(0, "master was in-progress. retry\n"); - ret = DLM_FORWARD; - } else - ret = status; - lksb->status = status; + ret = status; } else { mlog_errno(tmpret); if (dlm_is_host_down(tmpret)) { @@ -363,7 +366,6 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, /* something bad. this will BUG in ocfs2 */ ret = dlm_err_to_dlm_status(tmpret); } - lksb->status = ret; } return ret; @@ -474,6 +476,10 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) /* lock was found on queue */ lksb = lock->lksb; + if (flags & (LKM_VALBLK|LKM_PUT_LVB) && + lock->ml.type != LKM_EXMODE) + flags &= ~(LKM_VALBLK|LKM_PUT_LVB); + /* unlockast only called on originating node */ if (flags & LKM_PUT_LVB) { lksb->flags |= DLM_LKSB_PUT_LVB; @@ -498,11 +504,8 @@ not_found: "cookie=%u:%llu\n", dlm_get_lock_cookie_node(unlock->cookie), dlm_get_lock_cookie_seq(unlock->cookie)); - else { - /* send the lksb->status back to the other node */ - status = lksb->status; + else dlm_lock_put(lock); - } leave: if (res) @@ -524,26 +527,22 @@ static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, if (dlm_lock_on_list(&res->blocked, lock)) { /* cancel this outright */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK); } else if (dlm_lock_on_list(&res->converting, lock)) { /* cancel the request, put back on granted */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK | DLM_UNLOCK_REGRANT_LOCK | DLM_UNLOCK_CLEAR_CONVERT_TYPE); } else if (dlm_lock_on_list(&res->granted, lock)) { - /* too late, already granted. DLM_CANCELGRANT */ - lksb->status = DLM_CANCELGRANT; - status = DLM_NORMAL; + /* too late, already granted. */ + status = DLM_CANCELGRANT; *actions = DLM_UNLOCK_CALL_AST; } else { mlog(ML_ERROR, "lock to cancel is not on any list!\n"); - lksb->status = DLM_IVLOCKID; status = DLM_IVLOCKID; *actions = 0; } @@ -560,13 +559,11 @@ static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, /* unlock request */ if (!dlm_lock_on_list(&res->granted, lock)) { - lksb->status = DLM_DENIED; status = DLM_DENIED; dlm_error(status); *actions = 0; } else { /* unlock granted lock */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_FREE_LOCK | DLM_UNLOCK_CALL_AST | @@ -623,6 +620,8 @@ retry: spin_lock(&res->spinlock); is_master = (res->owner == dlm->node_num); + if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE) + flags &= ~LKM_VALBLK; spin_unlock(&res->spinlock); if (is_master) { @@ -656,7 +655,7 @@ retry: } if (call_ast) { - mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status); + mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { /* it is possible that there is one last bast * pending. make sure it is flushed, then @@ -668,9 +667,12 @@ retry: wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } - (*unlockast)(data, lksb->status); + (*unlockast)(data, status); } + if (status == DLM_CANCELGRANT) + status = DLM_NORMAL; + if (status == DLM_NORMAL) { mlog(0, "kicking the thread\n"); dlm_kick_thread(dlm, res); diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c index 74ca4e5f976..eead48bbfac 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlm/userdlm.c @@ -102,10 +102,10 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) spin_unlock(&lockres->l_lock); } -#define user_log_dlm_error(_func, _stat, _lockres) do { \ - mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ - "resource %s: %s\n", dlm_errname(_stat), _func, \ - _lockres->l_name, dlm_errmsg(_stat)); \ +#define user_log_dlm_error(_func, _stat, _lockres) do { \ + mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ + "resource %.*s: %s\n", dlm_errname(_stat), _func, \ + _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \ } while (0) /* WARNING: This function lives in a world where the only three lock @@ -127,21 +127,22 @@ static void user_ast(void *opaque) struct user_lock_res *lockres = opaque; struct dlm_lockstatus *lksb; - mlog(0, "AST fired for lockres %s\n", lockres->l_name); + mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen, + lockres->l_name); spin_lock(&lockres->l_lock); lksb = &(lockres->l_lksb); if (lksb->status != DLM_NORMAL) { - mlog(ML_ERROR, "lksb status value of %u on lockres %s\n", - lksb->status, lockres->l_name); + mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", + lksb->status, lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); return; } mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE, - "Lockres %s, requested ivmode. flags 0x%x\n", - lockres->l_name, lockres->l_flags); + "Lockres %.*s, requested ivmode. flags 0x%x\n", + lockres->l_namelen, lockres->l_name, lockres->l_flags); /* we're downconverting. */ if (lockres->l_requested < lockres->l_level) { @@ -213,8 +214,8 @@ static void user_bast(void *opaque, int level) { struct user_lock_res *lockres = opaque; - mlog(0, "Blocking AST fired for lockres %s. Blocking level %d\n", - lockres->l_name, level); + mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n", + lockres->l_namelen, lockres->l_name, level); spin_lock(&lockres->l_lock); lockres->l_flags |= USER_LOCK_BLOCKED; @@ -231,7 +232,8 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) { struct user_lock_res *lockres = opaque; - mlog(0, "UNLOCK AST called on lock %s\n", lockres->l_name); + mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen, + lockres->l_name); if (status != DLM_NORMAL && status != DLM_CANCELGRANT) mlog(ML_ERROR, "Dlm returns status %d\n", status); @@ -244,8 +246,6 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { lockres->l_level = LKM_IVMODE; } else if (status == DLM_CANCELGRANT) { - mlog(0, "Lock %s, cancel fails, flags 0x%x\n", - lockres->l_name, lockres->l_flags); /* We tried to cancel a convert request, but it was * already granted. Don't clear the busy flag - the * ast should've done this already. */ @@ -255,8 +255,6 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) } else { BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); /* Cancel succeeded, we want to re-queue */ - mlog(0, "Lock %s, cancel succeeds, flags 0x%x\n", - lockres->l_name, lockres->l_flags); lockres->l_requested = LKM_IVMODE; /* cancel an * upconvert * request. */ @@ -287,13 +285,14 @@ static void user_dlm_unblock_lock(void *opaque) struct user_lock_res *lockres = (struct user_lock_res *) opaque; struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); - mlog(0, "processing lockres %s\n", lockres->l_name); + mlog(0, "processing lockres %.*s\n", lockres->l_namelen, + lockres->l_name); spin_lock(&lockres->l_lock); mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED), - "Lockres %s, flags 0x%x\n", - lockres->l_name, lockres->l_flags); + "Lockres %.*s, flags 0x%x\n", + lockres->l_namelen, lockres->l_name, lockres->l_flags); /* notice that we don't clear USER_LOCK_BLOCKED here. If it's * set, we want user_ast clear it. */ @@ -305,22 +304,16 @@ static void user_dlm_unblock_lock(void *opaque) * flag, and finally we might get another bast which re-queues * us before our ast for the downconvert is called. */ if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { - mlog(0, "Lockres %s, flags 0x%x: queued but not blocking\n", - lockres->l_name, lockres->l_flags); spin_unlock(&lockres->l_lock); goto drop_ref; } if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { - mlog(0, "lock is in teardown so we do nothing\n"); spin_unlock(&lockres->l_lock); goto drop_ref; } if (lockres->l_flags & USER_LOCK_BUSY) { - mlog(0, "Cancel lock %s, flags 0x%x\n", - lockres->l_name, lockres->l_flags); - if (lockres->l_flags & USER_LOCK_IN_CANCEL) { spin_unlock(&lockres->l_lock); goto drop_ref; @@ -372,6 +365,7 @@ static void user_dlm_unblock_lock(void *opaque) &lockres->l_lksb, LKM_CONVERT|LKM_VALBLK, lockres->l_name, + lockres->l_namelen, user_ast, lockres, user_bast); @@ -420,16 +414,16 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres, if (level != LKM_EXMODE && level != LKM_PRMODE) { - mlog(ML_ERROR, "lockres %s: invalid request!\n", - lockres->l_name); + mlog(ML_ERROR, "lockres %.*s: invalid request!\n", + lockres->l_namelen, lockres->l_name); status = -EINVAL; goto bail; } - mlog(0, "lockres %s: asking for %s lock, passed flags = 0x%x\n", - lockres->l_name, - (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", - lkm_flags); + mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n", + lockres->l_namelen, lockres->l_name, + (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", + lkm_flags); again: if (signal_pending(current)) { @@ -474,15 +468,13 @@ again: BUG_ON(level == LKM_IVMODE); BUG_ON(level == LKM_NLMODE); - mlog(0, "lock %s, get lock from %d to level = %d\n", - lockres->l_name, lockres->l_level, level); - /* call dlm_lock to upgrade lock now */ status = dlmlock(dlm, level, &lockres->l_lksb, local_flags, lockres->l_name, + lockres->l_namelen, user_ast, lockres, user_bast); @@ -498,9 +490,6 @@ again: goto bail; } - mlog(0, "lock %s, successfull return from dlmlock\n", - lockres->l_name); - user_wait_on_busy_lock(lockres); goto again; } @@ -508,9 +497,6 @@ again: user_dlm_inc_holders(lockres, level); spin_unlock(&lockres->l_lock); - mlog(0, "lockres %s: Got %s lock!\n", lockres->l_name, - (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE"); - status = 0; bail: return status; @@ -538,13 +524,11 @@ void user_dlm_cluster_unlock(struct user_lock_res *lockres, { if (level != LKM_EXMODE && level != LKM_PRMODE) { - mlog(ML_ERROR, "lockres %s: invalid request!\n", lockres->l_name); + mlog(ML_ERROR, "lockres %.*s: invalid request!\n", + lockres->l_namelen, lockres->l_name); return; } - mlog(0, "lockres %s: dropping %s lock\n", lockres->l_name, - (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE"); - spin_lock(&lockres->l_lock); user_dlm_dec_holders(lockres, level); __user_dlm_cond_queue_lockres(lockres); @@ -602,6 +586,7 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres, memcpy(lockres->l_name, dentry->d_name.name, dentry->d_name.len); + lockres->l_namelen = dentry->d_name.len; } int user_dlm_destroy_lock(struct user_lock_res *lockres) @@ -609,11 +594,10 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) int status = -EBUSY; struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); - mlog(0, "asked to destroy %s\n", lockres->l_name); + mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name); spin_lock(&lockres->l_lock); if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { - mlog(0, "Lock is already torn down\n"); spin_unlock(&lockres->l_lock); return 0; } @@ -623,8 +607,6 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) while (lockres->l_flags & USER_LOCK_BUSY) { spin_unlock(&lockres->l_lock); - mlog(0, "lock %s is busy\n", lockres->l_name); - user_wait_on_busy_lock(lockres); spin_lock(&lockres->l_lock); @@ -632,14 +614,12 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) if (lockres->l_ro_holders || lockres->l_ex_holders) { spin_unlock(&lockres->l_lock); - mlog(0, "lock %s has holders\n", lockres->l_name); goto bail; } status = 0; if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { spin_unlock(&lockres->l_lock); - mlog(0, "lock %s is not attached\n", lockres->l_name); goto bail; } @@ -647,7 +627,6 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) lockres->l_flags |= USER_LOCK_BUSY; spin_unlock(&lockres->l_lock); - mlog(0, "unlocking lockres %s\n", lockres->l_name); status = dlmunlock(dlm, &lockres->l_lksb, LKM_VALBLK, @@ -672,7 +651,7 @@ struct dlm_ctxt *user_dlm_register_context(struct qstr *name) u32 dlm_key; char *domain; - domain = kmalloc(name->len + 1, GFP_KERNEL); + domain = kmalloc(name->len + 1, GFP_NOFS); if (!domain) { mlog_errno(-ENOMEM); return ERR_PTR(-ENOMEM); diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h index 04178bc40b7..c400e93bbf7 100644 --- a/fs/ocfs2/dlm/userdlm.h +++ b/fs/ocfs2/dlm/userdlm.h @@ -53,6 +53,7 @@ struct user_lock_res { #define USER_DLM_LOCK_ID_MAX_LEN 32 char l_name[USER_DLM_LOCK_ID_MAX_LEN]; + int l_namelen; int l_level; unsigned int l_ro_holders; unsigned int l_ex_holders; diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 64cd52860c8..8801e41afe8 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -46,6 +46,7 @@ #include "ocfs2.h" #include "alloc.h" +#include "dcache.h" #include "dlmglue.h" #include "extent_map.h" #include "heartbeat.h" @@ -66,78 +67,161 @@ struct ocfs2_mask_waiter { unsigned long mw_goal; }; -static void ocfs2_inode_ast_func(void *opaque); -static void ocfs2_inode_bast_func(void *opaque, - int level); -static void ocfs2_super_ast_func(void *opaque); -static void ocfs2_super_bast_func(void *opaque, - int level); -static void ocfs2_rename_ast_func(void *opaque); -static void ocfs2_rename_bast_func(void *opaque, - int level); - -/* so far, all locks have gotten along with the same unlock ast */ -static void ocfs2_unlock_ast_func(void *opaque, - enum dlm_status status); -static int ocfs2_do_unblock_meta(struct inode *inode, - int *requeue); -static int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres, - int *requeue); -static int ocfs2_unblock_data(struct ocfs2_lock_res *lockres, - int *requeue); -static int ocfs2_unblock_inode_lock(struct ocfs2_lock_res *lockres, - int *requeue); -static int ocfs2_unblock_osb_lock(struct ocfs2_lock_res *lockres, - int *requeue); -typedef void (ocfs2_convert_worker_t)(struct ocfs2_lock_res *, int); -static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb, - struct ocfs2_lock_res *lockres, - int *requeue, - ocfs2_convert_worker_t *worker); +static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres); +static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres); +/* + * Return value from ->downconvert_worker functions. + * + * These control the precise actions of ocfs2_unblock_lock() + * and ocfs2_process_blocked_lock() + * + */ +enum ocfs2_unblock_action { + UNBLOCK_CONTINUE = 0, /* Continue downconvert */ + UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire + * ->post_unlock callback */ + UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire + * ->post_unlock() callback. */ +}; + +struct ocfs2_unblock_ctl { + int requeue; + enum ocfs2_unblock_action unblock_action; +}; + +static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, + int new_level); +static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres); + +static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, + int blocking); + +static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, + int blocking); + +static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres); + +/* + * OCFS2 Lock Resource Operations + * + * These fine tune the behavior of the generic dlmglue locking infrastructure. + * + * The most basic of lock types can point ->l_priv to their respective + * struct ocfs2_super and allow the default actions to manage things. + * + * Right now, each lock type also needs to implement an init function, + * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres() + * should be called when the lock is no longer needed (i.e., object + * destruction time). + */ struct ocfs2_lock_res_ops { - void (*ast)(void *); - void (*bast)(void *, int); - void (*unlock_ast)(void *, enum dlm_status); - int (*unblock)(struct ocfs2_lock_res *, int *); + /* + * Translate an ocfs2_lock_res * into an ocfs2_super *. Define + * this callback if ->l_priv is not an ocfs2_super pointer + */ + struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *); + + /* + * Optionally called in the downconvert (or "vote") thread + * after a successful downconvert. The lockres will not be + * referenced after this callback is called, so it is safe to + * free memory, etc. + * + * The exact semantics of when this is called are controlled + * by ->downconvert_worker() + */ + void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *); + + /* + * Allow a lock type to add checks to determine whether it is + * safe to downconvert a lock. Return 0 to re-queue the + * downconvert at a later time, nonzero to continue. + * + * For most locks, the default checks that there are no + * incompatible holders are sufficient. + * + * Called with the lockres spinlock held. + */ + int (*check_downconvert)(struct ocfs2_lock_res *, int); + + /* + * Allows a lock type to populate the lock value block. This + * is called on downconvert, and when we drop a lock. + * + * Locks that want to use this should set LOCK_TYPE_USES_LVB + * in the flags field. + * + * Called with the lockres spinlock held. + */ + void (*set_lvb)(struct ocfs2_lock_res *); + + /* + * Called from the downconvert thread when it is determined + * that a lock will be downconverted. This is called without + * any locks held so the function can do work that might + * schedule (syncing out data, etc). + * + * This should return any one of the ocfs2_unblock_action + * values, depending on what it wants the thread to do. + */ + int (*downconvert_worker)(struct ocfs2_lock_res *, int); + + /* + * LOCK_TYPE_* flags which describe the specific requirements + * of a lock type. Descriptions of each individual flag follow. + */ + int flags; }; +/* + * Some locks want to "refresh" potentially stale data when a + * meaningful (PRMODE or EXMODE) lock level is first obtained. If this + * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the + * individual lockres l_flags member from the ast function. It is + * expected that the locking wrapper will clear the + * OCFS2_LOCK_NEEDS_REFRESH flag when done. + */ +#define LOCK_TYPE_REQUIRES_REFRESH 0x1 + +/* + * Indicate that a lock type makes use of the lock value block. The + * ->set_lvb lock type callback must be defined. + */ +#define LOCK_TYPE_USES_LVB 0x2 + static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = { - .ast = ocfs2_inode_ast_func, - .bast = ocfs2_inode_bast_func, - .unlock_ast = ocfs2_unlock_ast_func, - .unblock = ocfs2_unblock_inode_lock, + .get_osb = ocfs2_get_inode_osb, + .flags = 0, }; static struct ocfs2_lock_res_ops ocfs2_inode_meta_lops = { - .ast = ocfs2_inode_ast_func, - .bast = ocfs2_inode_bast_func, - .unlock_ast = ocfs2_unlock_ast_func, - .unblock = ocfs2_unblock_meta, + .get_osb = ocfs2_get_inode_osb, + .check_downconvert = ocfs2_check_meta_downconvert, + .set_lvb = ocfs2_set_meta_lvb, + .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB, }; -static void ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, - int blocking); - static struct ocfs2_lock_res_ops ocfs2_inode_data_lops = { - .ast = ocfs2_inode_ast_func, - .bast = ocfs2_inode_bast_func, - .unlock_ast = ocfs2_unlock_ast_func, - .unblock = ocfs2_unblock_data, + .get_osb = ocfs2_get_inode_osb, + .downconvert_worker = ocfs2_data_convert_worker, + .flags = 0, }; static struct ocfs2_lock_res_ops ocfs2_super_lops = { - .ast = ocfs2_super_ast_func, - .bast = ocfs2_super_bast_func, - .unlock_ast = ocfs2_unlock_ast_func, - .unblock = ocfs2_unblock_osb_lock, + .flags = LOCK_TYPE_REQUIRES_REFRESH, }; static struct ocfs2_lock_res_ops ocfs2_rename_lops = { - .ast = ocfs2_rename_ast_func, - .bast = ocfs2_rename_bast_func, - .unlock_ast = ocfs2_unlock_ast_func, - .unblock = ocfs2_unblock_osb_lock, + .flags = 0, +}; + +static struct ocfs2_lock_res_ops ocfs2_dentry_lops = { + .get_osb = ocfs2_get_dentry_osb, + .post_unlock = ocfs2_dentry_post_unlock, + .downconvert_worker = ocfs2_dentry_convert_worker, + .flags = 0, }; static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) @@ -147,29 +231,26 @@ static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) lockres->l_type == OCFS2_LOCK_TYPE_RW; } -static inline int ocfs2_is_super_lock(struct ocfs2_lock_res *lockres) +static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres) { - return lockres->l_type == OCFS2_LOCK_TYPE_SUPER; -} + BUG_ON(!ocfs2_is_inode_lock(lockres)); -static inline int ocfs2_is_rename_lock(struct ocfs2_lock_res *lockres) -{ - return lockres->l_type == OCFS2_LOCK_TYPE_RENAME; + return (struct inode *) lockres->l_priv; } -static inline struct ocfs2_super *ocfs2_lock_res_super(struct ocfs2_lock_res *lockres) +static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres) { - BUG_ON(!ocfs2_is_super_lock(lockres) - && !ocfs2_is_rename_lock(lockres)); + BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY); - return (struct ocfs2_super *) lockres->l_priv; + return (struct ocfs2_dentry_lock *)lockres->l_priv; } -static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres) +static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres) { - BUG_ON(!ocfs2_is_inode_lock(lockres)); + if (lockres->l_ops->get_osb) + return lockres->l_ops->get_osb(lockres); - return (struct inode *) lockres->l_priv; + return (struct ocfs2_super *)lockres->l_priv; } static int ocfs2_lock_create(struct ocfs2_super *osb, @@ -200,25 +281,6 @@ static int ocfs2_meta_lock_update(struct inode *inode, struct buffer_head **bh); static void ocfs2_drop_osb_locks(struct ocfs2_super *osb); static inline int ocfs2_highest_compat_lock_level(int level); -static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode, - struct ocfs2_lock_res *lockres, - int new_level); - -static char *ocfs2_lock_type_strings[] = { - [OCFS2_LOCK_TYPE_META] = "Meta", - [OCFS2_LOCK_TYPE_DATA] = "Data", - [OCFS2_LOCK_TYPE_SUPER] = "Super", - [OCFS2_LOCK_TYPE_RENAME] = "Rename", - /* Need to differntiate from [R]ename.. serializing writes is the - * important job it does, anyway. */ - [OCFS2_LOCK_TYPE_RW] = "Write/Read", -}; - -static char *ocfs2_lock_type_string(enum ocfs2_lock_type type) -{ - mlog_bug_on_msg(type >= OCFS2_NUM_LOCK_TYPES, "%d\n", type); - return ocfs2_lock_type_strings[type]; -} static void ocfs2_build_lock_name(enum ocfs2_lock_type type, u64 blkno, @@ -242,7 +304,7 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type, mlog_exit_void(); } -static spinlock_t ocfs2_dlm_tracking_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res, struct ocfs2_dlm_debug *dlm_debug) @@ -265,13 +327,9 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res) static void ocfs2_lock_res_init_common(struct ocfs2_super *osb, struct ocfs2_lock_res *res, enum ocfs2_lock_type type, - u64 blkno, - u32 generation, struct ocfs2_lock_res_ops *ops, void *priv) { - ocfs2_build_lock_name(type, blkno, generation, res->l_name); - res->l_type = type; res->l_ops = ops; res->l_priv = priv; @@ -299,6 +357,7 @@ void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res) void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res, enum ocfs2_lock_type type, + unsigned int generation, struct inode *inode) { struct ocfs2_lock_res_ops *ops; @@ -319,9 +378,73 @@ void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res, break; }; - ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, - OCFS2_I(inode)->ip_blkno, - inode->i_generation, ops, inode); + ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno, + generation, res->l_name); + ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode); +} + +static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres) +{ + struct inode *inode = ocfs2_lock_res_inode(lockres); + + return OCFS2_SB(inode->i_sb); +} + +static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres) +{ + __be64 inode_blkno_be; + + memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], + sizeof(__be64)); + + return be64_to_cpu(inode_blkno_be); +} + +static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres) +{ + struct ocfs2_dentry_lock *dl = lockres->l_priv; + + return OCFS2_SB(dl->dl_inode->i_sb); +} + +void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl, + u64 parent, struct inode *inode) +{ + int len; + u64 inode_blkno = OCFS2_I(inode)->ip_blkno; + __be64 inode_blkno_be = cpu_to_be64(inode_blkno); + struct ocfs2_lock_res *lockres = &dl->dl_lockres; + + ocfs2_lock_res_init_once(lockres); + + /* + * Unfortunately, the standard lock naming scheme won't work + * here because we have two 16 byte values to use. Instead, + * we'll stuff the inode number as a binary value. We still + * want error prints to show something without garbling the + * display, so drop a null byte in there before the inode + * number. A future version of OCFS2 will likely use all + * binary lock names. The stringified names have been a + * tremendous aid in debugging, but now that the debugfs + * interface exists, we can mangle things there if need be. + * + * NOTE: We also drop the standard "pad" value (the total lock + * name size stays the same though - the last part is all + * zeros due to the memset in ocfs2_lock_res_init_once() + */ + len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START, + "%c%016llx", + ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY), + (long long)parent); + + BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1)); + + memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be, + sizeof(__be64)); + + ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres, + OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops, + dl); } static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res, @@ -330,8 +453,9 @@ static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res, /* Superblock lockres doesn't come from a slab so we call init * once on it manually. */ ocfs2_lock_res_init_once(res); + ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO, + 0, res->l_name); ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER, - OCFS2_SUPER_BLOCK_BLKNO, 0, &ocfs2_super_lops, osb); } @@ -341,7 +465,8 @@ static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res, /* Rename lockres doesn't come from a slab so we call init * once on it manually. */ ocfs2_lock_res_init_once(res); - ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME, 0, 0, + ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name); + ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME, &ocfs2_rename_lops, osb); } @@ -495,7 +620,8 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo * information is already up to data. Convert from NL to * *anything* however should mark ourselves as needing an * update */ - if (lockres->l_level == LKM_NLMODE) + if (lockres->l_level == LKM_NLMODE && + lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; @@ -512,7 +638,8 @@ static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *loc BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); if (lockres->l_requested > LKM_NLMODE && - !(lockres->l_flags & OCFS2_LOCK_LOCAL)) + !(lockres->l_flags & OCFS2_LOCK_LOCAL) && + lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; @@ -522,68 +649,6 @@ static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *loc mlog_exit_void(); } -static void ocfs2_inode_ast_func(void *opaque) -{ - struct ocfs2_lock_res *lockres = opaque; - struct inode *inode; - struct dlm_lockstatus *lksb; - unsigned long flags; - - mlog_entry_void(); - - inode = ocfs2_lock_res_inode(lockres); - - mlog(0, "AST fired for inode %llu, l_action = %u, type = %s\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, lockres->l_action, - ocfs2_lock_type_string(lockres->l_type)); - - BUG_ON(!ocfs2_is_inode_lock(lockres)); - - spin_lock_irqsave(&lockres->l_lock, flags); - - lksb = &(lockres->l_lksb); - if (lksb->status != DLM_NORMAL) { - mlog(ML_ERROR, "ocfs2_inode_ast_func: lksb status value of %u " - "on inode %llu\n", lksb->status, - (unsigned long long)OCFS2_I(inode)->ip_blkno); - spin_unlock_irqrestore(&lockres->l_lock, flags); - mlog_exit_void(); - return; - } - - switch(lockres->l_action) { - case OCFS2_AST_ATTACH: - ocfs2_generic_handle_attach_action(lockres); - lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL); - break; - case OCFS2_AST_CONVERT: - ocfs2_generic_handle_convert_action(lockres); - break; - case OCFS2_AST_DOWNCONVERT: - ocfs2_generic_handle_downconvert_action(lockres); - break; - default: - mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u " - "lockres flags = 0x%lx, unlock action: %u\n", - lockres->l_name, lockres->l_action, lockres->l_flags, - lockres->l_unlock_action); - - BUG(); - } - - /* data and rw locking ignores refresh flag for now. */ - if (lockres->l_type != OCFS2_LOCK_TYPE_META) - lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); - - /* set it to something invalid so if we get called again we - * can catch it. */ - lockres->l_action = OCFS2_AST_INVALID; - spin_unlock_irqrestore(&lockres->l_lock, flags); - wake_up(&lockres->l_event); - - mlog_exit_void(); -} - static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level) { @@ -610,54 +675,33 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, return needs_downconvert; } -static void ocfs2_generic_bast_func(struct ocfs2_super *osb, - struct ocfs2_lock_res *lockres, - int level) +static void ocfs2_blocking_ast(void *opaque, int level) { + struct ocfs2_lock_res *lockres = opaque; + struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); int needs_downconvert; unsigned long flags; - mlog_entry_void(); - BUG_ON(level <= LKM_NLMODE); + mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n", + lockres->l_name, level, lockres->l_level, + ocfs2_lock_type_string(lockres->l_type)); + spin_lock_irqsave(&lockres->l_lock, flags); needs_downconvert = ocfs2_generic_handle_bast(lockres, level); if (needs_downconvert) ocfs2_schedule_blocked_lock(osb, lockres); spin_unlock_irqrestore(&lockres->l_lock, flags); - ocfs2_kick_vote_thread(osb); - wake_up(&lockres->l_event); - mlog_exit_void(); -} - -static void ocfs2_inode_bast_func(void *opaque, int level) -{ - struct ocfs2_lock_res *lockres = opaque; - struct inode *inode; - struct ocfs2_super *osb; - - mlog_entry_void(); - - BUG_ON(!ocfs2_is_inode_lock(lockres)); - inode = ocfs2_lock_res_inode(lockres); - osb = OCFS2_SB(inode->i_sb); - - mlog(0, "BAST fired for inode %llu, blocking %d, level %d type %s\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, level, - lockres->l_level, ocfs2_lock_type_string(lockres->l_type)); - - ocfs2_generic_bast_func(osb, lockres, level); - - mlog_exit_void(); + ocfs2_kick_vote_thread(osb); } -static void ocfs2_generic_ast_func(struct ocfs2_lock_res *lockres, - int ignore_refresh) +static void ocfs2_locking_ast(void *opaque) { + struct ocfs2_lock_res *lockres = opaque; struct dlm_lockstatus *lksb = &lockres->l_lksb; unsigned long flags; @@ -673,6 +717,7 @@ static void ocfs2_generic_ast_func(struct ocfs2_lock_res *lockres, switch(lockres->l_action) { case OCFS2_AST_ATTACH: ocfs2_generic_handle_attach_action(lockres); + lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL); break; case OCFS2_AST_CONVERT: ocfs2_generic_handle_convert_action(lockres); @@ -681,80 +726,19 @@ static void ocfs2_generic_ast_func(struct ocfs2_lock_res *lockres, ocfs2_generic_handle_downconvert_action(lockres); break; default: + mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u " + "lockres flags = 0x%lx, unlock action: %u\n", + lockres->l_name, lockres->l_action, lockres->l_flags, + lockres->l_unlock_action); BUG(); } - if (ignore_refresh) - lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); - /* set it to something invalid so if we get called again we * can catch it. */ lockres->l_action = OCFS2_AST_INVALID; - spin_unlock_irqrestore(&lockres->l_lock, flags); wake_up(&lockres->l_event); -} - -static void ocfs2_super_ast_func(void *opaque) -{ - struct ocfs2_lock_res *lockres = opaque; - - mlog_entry_void(); - mlog(0, "Superblock AST fired\n"); - - BUG_ON(!ocfs2_is_super_lock(lockres)); - ocfs2_generic_ast_func(lockres, 0); - - mlog_exit_void(); -} - -static void ocfs2_super_bast_func(void *opaque, - int level) -{ - struct ocfs2_lock_res *lockres = opaque; - struct ocfs2_super *osb; - - mlog_entry_void(); - mlog(0, "Superblock BAST fired\n"); - - BUG_ON(!ocfs2_is_super_lock(lockres)); - osb = ocfs2_lock_res_super(lockres); - ocfs2_generic_bast_func(osb, lockres, level); - - mlog_exit_void(); -} - -static void ocfs2_rename_ast_func(void *opaque) -{ - struct ocfs2_lock_res *lockres = opaque; - - mlog_entry_void(); - - mlog(0, "Rename AST fired\n"); - - BUG_ON(!ocfs2_is_rename_lock(lockres)); - - ocfs2_generic_ast_func(lockres, 1); - - mlog_exit_void(); -} - -static void ocfs2_rename_bast_func(void *opaque, - int level) -{ - struct ocfs2_lock_res *lockres = opaque; - struct ocfs2_super *osb; - - mlog_entry_void(); - - mlog(0, "Rename BAST fired\n"); - - BUG_ON(!ocfs2_is_rename_lock(lockres)); - - osb = ocfs2_lock_res_super(lockres); - ocfs2_generic_bast_func(osb, lockres, level); - - mlog_exit_void(); + spin_unlock_irqrestore(&lockres->l_lock, flags); } static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, @@ -810,9 +794,10 @@ static int ocfs2_lock_create(struct ocfs2_super *osb, &lockres->l_lksb, dlm_flags, lockres->l_name, - lockres->l_ops->ast, + OCFS2_LOCK_ID_MAX_LEN - 1, + ocfs2_locking_ast, lockres, - lockres->l_ops->bast); + ocfs2_blocking_ast); if (status != DLM_NORMAL) { ocfs2_log_dlm_error("dlmlock", status, lockres); ret = -EINVAL; @@ -930,6 +915,9 @@ static int ocfs2_cluster_lock(struct ocfs2_super *osb, ocfs2_init_mask_waiter(&mw); + if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) + lkm_flags |= LKM_VALBLK; + again: wait = 0; @@ -997,11 +985,12 @@ again: status = dlmlock(osb->dlm, level, &lockres->l_lksb, - lkm_flags|LKM_CONVERT|LKM_VALBLK, + lkm_flags|LKM_CONVERT, lockres->l_name, - lockres->l_ops->ast, + OCFS2_LOCK_ID_MAX_LEN - 1, + ocfs2_locking_ast, lockres, - lockres->l_ops->bast); + ocfs2_blocking_ast); if (status != DLM_NORMAL) { if ((lkm_flags & LKM_NOQUEUE) && (status == DLM_NOTQUEUED)) @@ -1074,18 +1063,21 @@ static void ocfs2_cluster_unlock(struct ocfs2_super *osb, mlog_exit_void(); } -static int ocfs2_create_new_inode_lock(struct inode *inode, - struct ocfs2_lock_res *lockres) +int ocfs2_create_new_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int ex, + int local) { - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int level = ex ? LKM_EXMODE : LKM_PRMODE; unsigned long flags; + int lkm_flags = local ? LKM_LOCAL : 0; spin_lock_irqsave(&lockres->l_lock, flags); BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); lockres_or_flags(lockres, OCFS2_LOCK_LOCAL); spin_unlock_irqrestore(&lockres->l_lock, flags); - return ocfs2_lock_create(osb, lockres, LKM_EXMODE, LKM_LOCAL); + return ocfs2_lock_create(osb, lockres, level, lkm_flags); } /* Grants us an EX lock on the data and metadata resources, skipping @@ -1097,6 +1089,7 @@ static int ocfs2_create_new_inode_lock(struct inode *inode, int ocfs2_create_new_inode_locks(struct inode *inode) { int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); BUG_ON(!inode); BUG_ON(!ocfs2_inode_is_new(inode)); @@ -1113,22 +1106,23 @@ int ocfs2_create_new_inode_locks(struct inode *inode) * on a resource which has an invalid one -- we'll set it * valid when we release the EX. */ - ret = ocfs2_create_new_inode_lock(inode, - &OCFS2_I(inode)->ip_rw_lockres); + ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1); if (ret) { mlog_errno(ret); goto bail; } - ret = ocfs2_create_new_inode_lock(inode, - &OCFS2_I(inode)->ip_meta_lockres); + /* + * We don't want to use LKM_LOCAL on a meta data lock as they + * don't use a generation in their lock names. + */ + ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_meta_lockres, 1, 0); if (ret) { mlog_errno(ret); goto bail; } - ret = ocfs2_create_new_inode_lock(inode, - &OCFS2_I(inode)->ip_data_lockres); + ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_data_lockres, 1, 1); if (ret) { mlog_errno(ret); goto bail; @@ -1317,7 +1311,17 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode) lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb; - lvb->lvb_version = cpu_to_be32(OCFS2_LVB_VERSION); + /* + * Invalidate the LVB of a deleted inode - this way other + * nodes are forced to go to disk and discover the new inode + * status. + */ + if (oi->ip_flags & OCFS2_INODE_DELETED) { + lvb->lvb_version = 0; + goto out; + } + + lvb->lvb_version = OCFS2_LVB_VERSION; lvb->lvb_isize = cpu_to_be64(i_size_read(inode)); lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters); lvb->lvb_iuid = cpu_to_be32(inode->i_uid); @@ -1330,7 +1334,10 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode) cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime)); lvb->lvb_imtime_packed = cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime)); + lvb->lvb_iattr = cpu_to_be32(oi->ip_attr); + lvb->lvb_igeneration = cpu_to_be32(inode->i_generation); +out: mlog_meta_lvb(0, lockres); mlog_exit_void(); @@ -1360,6 +1367,9 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode) oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters); i_size_write(inode, be64_to_cpu(lvb->lvb_isize)); + oi->ip_attr = be32_to_cpu(lvb->lvb_iattr); + ocfs2_set_inode_flags(inode); + /* fast-symlinks are a special case */ if (S_ISLNK(inode->i_mode) && !oi->ip_clusters) inode->i_blocks = 0; @@ -1382,11 +1392,13 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode) mlog_exit_void(); } -static inline int ocfs2_meta_lvb_is_trustable(struct ocfs2_lock_res *lockres) +static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, + struct ocfs2_lock_res *lockres) { struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb; - if (be32_to_cpu(lvb->lvb_version) == OCFS2_LVB_VERSION) + if (lvb->lvb_version == OCFS2_LVB_VERSION + && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation) return 1; return 0; } @@ -1483,7 +1495,7 @@ static int ocfs2_meta_lock_update(struct inode *inode, * map (directories, bitmap files, etc) */ ocfs2_extent_map_trunc(inode, 0); - if (ocfs2_meta_lvb_is_trustable(lockres)) { + if (ocfs2_meta_lvb_is_trustable(inode, lockres)) { mlog(0, "Trusting LVB on inode %llu\n", (unsigned long long)oi->ip_blkno); ocfs2_refresh_inode_from_lvb(inode); @@ -1624,6 +1636,18 @@ int ocfs2_meta_lock_full(struct inode *inode, wait_event(osb->recovery_event, ocfs2_node_map_is_empty(osb, &osb->recovery_map)); + /* + * We only see this flag if we're being called from + * ocfs2_read_locked_inode(). It means we're locking an inode + * which hasn't been populated yet, so clear the refresh flag + * and let the caller handle it. + */ + if (inode->i_state & I_NEW) { + status = 0; + ocfs2_complete_lock_res_refresh(lockres, 0); + goto bail; + } + /* This is fun. The caller may want a bh back, or it may * not. ocfs2_meta_lock_update definitely wants one in, but * may or may not read one, depending on what's in the @@ -1803,6 +1827,34 @@ void ocfs2_rename_unlock(struct ocfs2_super *osb) ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE); } +int ocfs2_dentry_lock(struct dentry *dentry, int ex) +{ + int ret; + int level = ex ? LKM_EXMODE : LKM_PRMODE; + struct ocfs2_dentry_lock *dl = dentry->d_fsdata; + struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); + + BUG_ON(!dl); + + if (ocfs2_is_hard_readonly(osb)) + return -EROFS; + + ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0); + if (ret < 0) + mlog_errno(ret); + + return ret; +} + +void ocfs2_dentry_unlock(struct dentry *dentry, int ex) +{ + int level = ex ? LKM_EXMODE : LKM_PRMODE; + struct ocfs2_dentry_lock *dl = dentry->d_fsdata; + struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); + + ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); +} + /* Reference counting of the dlm debug structure. We want this because * open references on the debug inodes can live on after a mount, so * we can't rely on the ocfs2_super to always exist. */ @@ -1933,9 +1985,16 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) if (!lockres) return -EINVAL; - seq_printf(m, "0x%x\t" - "%.*s\t" - "%d\t" + seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION); + + if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY) + seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1, + lockres->l_name, + (unsigned int)ocfs2_get_dentry_lock_ino(lockres)); + else + seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name); + + seq_printf(m, "%d\t" "0x%lx\t" "0x%x\t" "0x%x\t" @@ -1943,8 +2002,6 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) "%u\t" "%d\t" "%d\t", - OCFS2_DLM_DEBUG_STR_VERSION, - OCFS2_LOCK_ID_MAX_LEN, lockres->l_name, lockres->l_level, lockres->l_flags, lockres->l_action, @@ -1995,7 +2052,7 @@ static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file) mlog_errno(ret); goto out; } - osb = (struct ocfs2_super *) inode->u.generic_ip; + osb = inode->i_private; ocfs2_get_dlm_debug(osb->osb_dlm_debug); priv->p_dlm_debug = osb->osb_dlm_debug; INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list); @@ -2071,8 +2128,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb) } /* launch vote thread */ - osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote-%d", - osb->osb_id); + osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote"); if (IS_ERR(osb->vote_task)) { status = PTR_ERR(osb->vote_task); osb->vote_task = NULL; @@ -2135,7 +2191,7 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb) mlog_exit_void(); } -static void ocfs2_unlock_ast_func(void *opaque, enum dlm_status status) +static void ocfs2_unlock_ast(void *opaque, enum dlm_status status) { struct ocfs2_lock_res *lockres = opaque; unsigned long flags; @@ -2191,24 +2247,20 @@ complete_unlock: mlog_exit_void(); } -typedef void (ocfs2_pre_drop_cb_t)(struct ocfs2_lock_res *, void *); - -struct drop_lock_cb { - ocfs2_pre_drop_cb_t *drop_func; - void *drop_data; -}; - static int ocfs2_drop_lock(struct ocfs2_super *osb, - struct ocfs2_lock_res *lockres, - struct drop_lock_cb *dcb) + struct ocfs2_lock_res *lockres) { enum dlm_status status; unsigned long flags; + int lkm_flags = 0; /* We didn't get anywhere near actually using this lockres. */ if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) goto out; + if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) + lkm_flags |= LKM_VALBLK; + spin_lock_irqsave(&lockres->l_lock, flags); mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING), @@ -2231,8 +2283,12 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb, spin_lock_irqsave(&lockres->l_lock, flags); } - if (dcb) - dcb->drop_func(lockres, dcb->drop_data); + if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) { + if (lockres->l_flags & OCFS2_LOCK_ATTACHED && + lockres->l_level == LKM_EXMODE && + !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) + lockres->l_ops->set_lvb(lockres); + } if (lockres->l_flags & OCFS2_LOCK_BUSY) mlog(ML_ERROR, "destroying busy lock: \"%s\"\n", @@ -2258,8 +2314,8 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb, mlog(0, "lock %s\n", lockres->l_name); - status = dlmunlock(osb->dlm, &lockres->l_lksb, LKM_VALBLK, - lockres->l_ops->unlock_ast, lockres); + status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags, + ocfs2_unlock_ast, lockres); if (status != DLM_NORMAL) { ocfs2_log_dlm_error("dlmunlock", status, lockres); mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags); @@ -2306,43 +2362,26 @@ void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) spin_unlock_irqrestore(&lockres->l_lock, flags); } -static void ocfs2_drop_osb_locks(struct ocfs2_super *osb) +void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) { - int status; - - mlog_entry_void(); - - ocfs2_mark_lockres_freeing(&osb->osb_super_lockres); - - status = ocfs2_drop_lock(osb, &osb->osb_super_lockres, NULL); - if (status < 0) - mlog_errno(status); - - ocfs2_mark_lockres_freeing(&osb->osb_rename_lockres); - - status = ocfs2_drop_lock(osb, &osb->osb_rename_lockres, NULL); - if (status < 0) - mlog_errno(status); + int ret; - mlog_exit(status); + ocfs2_mark_lockres_freeing(lockres); + ret = ocfs2_drop_lock(osb, lockres); + if (ret) + mlog_errno(ret); } -static void ocfs2_meta_pre_drop(struct ocfs2_lock_res *lockres, void *data) +static void ocfs2_drop_osb_locks(struct ocfs2_super *osb) { - struct inode *inode = data; - - /* the metadata lock requires a bit more work as we have an - * LVB to worry about. */ - if (lockres->l_flags & OCFS2_LOCK_ATTACHED && - lockres->l_level == LKM_EXMODE && - !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) - __ocfs2_stuff_meta_lvb(inode); + ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres); + ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres); } int ocfs2_drop_inode_locks(struct inode *inode) { int status, err; - struct drop_lock_cb meta_dcb = { ocfs2_meta_pre_drop, inode, }; mlog_entry_void(); @@ -2350,24 +2389,21 @@ int ocfs2_drop_inode_locks(struct inode *inode) * ocfs2_clear_inode has done it for us. */ err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), - &OCFS2_I(inode)->ip_data_lockres, - NULL); + &OCFS2_I(inode)->ip_data_lockres); if (err < 0) mlog_errno(err); status = err; err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), - &OCFS2_I(inode)->ip_meta_lockres, - &meta_dcb); + &OCFS2_I(inode)->ip_meta_lockres); if (err < 0) mlog_errno(err); if (err < 0 && !status) status = err; err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), - &OCFS2_I(inode)->ip_rw_lockres, - NULL); + &OCFS2_I(inode)->ip_rw_lockres); if (err < 0) mlog_errno(err); if (err < 0 && !status) @@ -2416,9 +2452,10 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, &lockres->l_lksb, dlm_flags, lockres->l_name, - lockres->l_ops->ast, + OCFS2_LOCK_ID_MAX_LEN - 1, + ocfs2_locking_ast, lockres, - lockres->l_ops->bast); + ocfs2_blocking_ast); if (status != DLM_NORMAL) { ocfs2_log_dlm_error("dlmlock", status, lockres); ret = -EINVAL; @@ -2477,7 +2514,7 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb, status = dlmunlock(osb->dlm, &lockres->l_lksb, LKM_CANCEL, - lockres->l_ops->unlock_ast, + ocfs2_unlock_ast, lockres); if (status != DLM_NORMAL) { ocfs2_log_dlm_error("dlmunlock", status, lockres); @@ -2491,115 +2528,15 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb, return ret; } -static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode, - struct ocfs2_lock_res *lockres, - int new_level) -{ - int ret; - - mlog_entry_void(); - - BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE); - - if (lockres->l_flags & OCFS2_LOCK_REFRESHING) { - ret = 0; - mlog(0, "lockres %s currently being refreshed -- backing " - "off!\n", lockres->l_name); - } else if (new_level == LKM_PRMODE) - ret = !lockres->l_ex_holders && - ocfs2_inode_fully_checkpointed(inode); - else /* Must be NLMODE we're converting to. */ - ret = !lockres->l_ro_holders && !lockres->l_ex_holders && - ocfs2_inode_fully_checkpointed(inode); - - mlog_exit(ret); - return ret; -} - -static int ocfs2_do_unblock_meta(struct inode *inode, - int *requeue) -{ - int new_level; - int set_lvb = 0; - int ret = 0; - struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres; - unsigned long flags; - - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - - mlog_entry_void(); - - spin_lock_irqsave(&lockres->l_lock, flags); - - BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); - - mlog(0, "l_level=%d, l_blocking=%d\n", lockres->l_level, - lockres->l_blocking); - - BUG_ON(lockres->l_level != LKM_EXMODE && - lockres->l_level != LKM_PRMODE); - - if (lockres->l_flags & OCFS2_LOCK_BUSY) { - *requeue = 1; - ret = ocfs2_prepare_cancel_convert(osb, lockres); - spin_unlock_irqrestore(&lockres->l_lock, flags); - if (ret) { - ret = ocfs2_cancel_convert(osb, lockres); - if (ret < 0) - mlog_errno(ret); - } - goto leave; - } - - new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking); - - mlog(0, "l_level=%d, l_blocking=%d, new_level=%d\n", - lockres->l_level, lockres->l_blocking, new_level); - - if (ocfs2_can_downconvert_meta_lock(inode, lockres, new_level)) { - if (lockres->l_level == LKM_EXMODE) - set_lvb = 1; - - /* If the lock hasn't been refreshed yet (rare), then - * our memory inode values are old and we skip - * stuffing the lvb. There's no need to actually clear - * out the lvb here as it's value is still valid. */ - if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) { - if (set_lvb) - __ocfs2_stuff_meta_lvb(inode); - } else - mlog(0, "lockres %s: downconverting stale lock!\n", - lockres->l_name); - - mlog(0, "calling ocfs2_downconvert_lock with l_level=%d, " - "l_blocking=%d, new_level=%d\n", - lockres->l_level, lockres->l_blocking, new_level); - - ocfs2_prepare_downconvert(lockres, new_level); - spin_unlock_irqrestore(&lockres->l_lock, flags); - ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb); - goto leave; - } - if (!ocfs2_inode_fully_checkpointed(inode)) - ocfs2_start_checkpoint(osb); - - *requeue = 1; - spin_unlock_irqrestore(&lockres->l_lock, flags); - ret = 0; -leave: - mlog_exit(ret); - return ret; -} - -static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb, - struct ocfs2_lock_res *lockres, - int *requeue, - ocfs2_convert_worker_t *worker) +static int ocfs2_unblock_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + struct ocfs2_unblock_ctl *ctl) { unsigned long flags; int blocking; int new_level; int ret = 0; + int set_lvb = 0; mlog_entry_void(); @@ -2609,7 +2546,7 @@ static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb, recheck: if (lockres->l_flags & OCFS2_LOCK_BUSY) { - *requeue = 1; + ctl->requeue = 1; ret = ocfs2_prepare_cancel_convert(osb, lockres); spin_unlock_irqrestore(&lockres->l_lock, flags); if (ret) { @@ -2623,27 +2560,33 @@ recheck: /* if we're blocking an exclusive and we have *any* holders, * then requeue. */ if ((lockres->l_blocking == LKM_EXMODE) - && (lockres->l_ex_holders || lockres->l_ro_holders)) { - spin_unlock_irqrestore(&lockres->l_lock, flags); - *requeue = 1; - ret = 0; - goto leave; - } + && (lockres->l_ex_holders || lockres->l_ro_holders)) + goto leave_requeue; /* If it's a PR we're blocking, then only * requeue if we've got any EX holders */ if (lockres->l_blocking == LKM_PRMODE && - lockres->l_ex_holders) { - spin_unlock_irqrestore(&lockres->l_lock, flags); - *requeue = 1; - ret = 0; - goto leave; - } + lockres->l_ex_holders) + goto leave_requeue; + + /* + * Can we get a lock in this state if the holder counts are + * zero? The meta data unblock code used to check this. + */ + if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) + && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) + goto leave_requeue; + + new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking); + + if (lockres->l_ops->check_downconvert + && !lockres->l_ops->check_downconvert(lockres, new_level)) + goto leave_requeue; /* If we get here, then we know that there are no more * incompatible holders (and anyone asking for an incompatible * lock is blocked). We can now downconvert the lock */ - if (!worker) + if (!lockres->l_ops->downconvert_worker) goto downconvert; /* Some lockres types want to do a bit of work before @@ -2653,7 +2596,10 @@ recheck: blocking = lockres->l_blocking; spin_unlock_irqrestore(&lockres->l_lock, flags); - worker(lockres, blocking); + ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); + + if (ctl->unblock_action == UNBLOCK_STOP_POST) + goto leave; spin_lock_irqsave(&lockres->l_lock, flags); if (blocking != lockres->l_blocking) { @@ -2663,25 +2609,43 @@ recheck: } downconvert: - *requeue = 0; - new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking); + ctl->requeue = 0; + + if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) { + if (lockres->l_level == LKM_EXMODE) + set_lvb = 1; + + /* + * We only set the lvb if the lock has been fully + * refreshed - otherwise we risk setting stale + * data. Otherwise, there's no need to actually clear + * out the lvb here as it's value is still valid. + */ + if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) + lockres->l_ops->set_lvb(lockres); + } ocfs2_prepare_downconvert(lockres, new_level); spin_unlock_irqrestore(&lockres->l_lock, flags); - ret = ocfs2_downconvert_lock(osb, lockres, new_level, 0); + ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb); leave: mlog_exit(ret); return ret; + +leave_requeue: + spin_unlock_irqrestore(&lockres->l_lock, flags); + ctl->requeue = 1; + + mlog_exit(0); + return 0; } -static void ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, - int blocking) +static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, + int blocking) { struct inode *inode; struct address_space *mapping; - mlog_entry_void(); - inode = ocfs2_lock_res_inode(lockres); mapping = inode->i_mapping; @@ -2702,116 +2666,159 @@ static void ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, filemap_fdatawait(mapping); } - mlog_exit_void(); + return UNBLOCK_CONTINUE; } -int ocfs2_unblock_data(struct ocfs2_lock_res *lockres, - int *requeue) +static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, + int new_level) { - int status; - struct inode *inode; - struct ocfs2_super *osb; - - mlog_entry_void(); - - inode = ocfs2_lock_res_inode(lockres); - osb = OCFS2_SB(inode->i_sb); - - mlog(0, "unblock inode %llu\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno); + struct inode *inode = ocfs2_lock_res_inode(lockres); + int checkpointed = ocfs2_inode_fully_checkpointed(inode); - status = ocfs2_generic_unblock_lock(osb, - lockres, - requeue, - ocfs2_data_convert_worker); - if (status < 0) - mlog_errno(status); + BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE); + BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed); - mlog(0, "inode %llu, requeue = %d\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, *requeue); + if (checkpointed) + return 1; - mlog_exit(status); - return status; + ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb)); + return 0; } -static int ocfs2_unblock_inode_lock(struct ocfs2_lock_res *lockres, - int *requeue) +static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres) { - int status; - struct inode *inode; - - mlog_entry_void(); - - mlog(0, "Unblock lockres %s\n", lockres->l_name); - - inode = ocfs2_lock_res_inode(lockres); + struct inode *inode = ocfs2_lock_res_inode(lockres); - status = ocfs2_generic_unblock_lock(OCFS2_SB(inode->i_sb), - lockres, - requeue, - NULL); - if (status < 0) - mlog_errno(status); - - mlog_exit(status); - return status; + __ocfs2_stuff_meta_lvb(inode); } - -int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres, - int *requeue) +/* + * Does the final reference drop on our dentry lock. Right now this + * happens in the vote thread, but we could choose to simplify the + * dlmglue API and push these off to the ocfs2_wq in the future. + */ +static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) { - int status; - struct inode *inode; - - mlog_entry_void(); + struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres); + ocfs2_dentry_lock_put(osb, dl); +} - inode = ocfs2_lock_res_inode(lockres); +/* + * d_delete() matching dentries before the lock downconvert. + * + * At this point, any process waiting to destroy the + * dentry_lock due to last ref count is stopped by the + * OCFS2_LOCK_QUEUED flag. + * + * We have two potential problems + * + * 1) If we do the last reference drop on our dentry_lock (via dput) + * we'll wind up in ocfs2_release_dentry_lock(), waiting on + * the downconvert to finish. Instead we take an elevated + * reference and push the drop until after we've completed our + * unblock processing. + * + * 2) There might be another process with a final reference, + * waiting on us to finish processing. If this is the case, we + * detect it and exit out - there's no more dentries anyway. + */ +static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, + int blocking) +{ + struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres); + struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode); + struct dentry *dentry; + unsigned long flags; + int extra_ref = 0; - mlog(0, "unblock inode %llu\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno); + /* + * This node is blocking another node from getting a read + * lock. This happens when we've renamed within a + * directory. We've forced the other nodes to d_delete(), but + * we never actually dropped our lock because it's still + * valid. The downconvert code will retain a PR for this node, + * so there's no further work to do. + */ + if (blocking == LKM_PRMODE) + return UNBLOCK_CONTINUE; - status = ocfs2_do_unblock_meta(inode, requeue); - if (status < 0) - mlog_errno(status); + /* + * Mark this inode as potentially orphaned. The code in + * ocfs2_delete_inode() will figure out whether it actually + * needs to be freed or not. + */ + spin_lock(&oi->ip_lock); + oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; + spin_unlock(&oi->ip_lock); - mlog(0, "inode %llu, requeue = %d\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, *requeue); + /* + * Yuck. We need to make sure however that the check of + * OCFS2_LOCK_FREEING and the extra reference are atomic with + * respect to a reference decrement or the setting of that + * flag. + */ + spin_lock_irqsave(&lockres->l_lock, flags); + spin_lock(&dentry_attach_lock); + if (!(lockres->l_flags & OCFS2_LOCK_FREEING) + && dl->dl_count) { + dl->dl_count++; + extra_ref = 1; + } + spin_unlock(&dentry_attach_lock); + spin_unlock_irqrestore(&lockres->l_lock, flags); - mlog_exit(status); - return status; -} + mlog(0, "extra_ref = %d\n", extra_ref); -/* Generic unblock function for any lockres whose private data is an - * ocfs2_super pointer. */ -static int ocfs2_unblock_osb_lock(struct ocfs2_lock_res *lockres, - int *requeue) -{ - int status; - struct ocfs2_super *osb; + /* + * We have a process waiting on us in ocfs2_dentry_iput(), + * which means we can't have any more outstanding + * aliases. There's no need to do any more work. + */ + if (!extra_ref) + return UNBLOCK_CONTINUE; + + spin_lock(&dentry_attach_lock); + while (1) { + dentry = ocfs2_find_local_alias(dl->dl_inode, + dl->dl_parent_blkno, 1); + if (!dentry) + break; + spin_unlock(&dentry_attach_lock); - mlog_entry_void(); + mlog(0, "d_delete(%.*s);\n", dentry->d_name.len, + dentry->d_name.name); - mlog(0, "Unblock lockres %s\n", lockres->l_name); + /* + * The following dcache calls may do an + * iput(). Normally we don't want that from the + * downconverting thread, but in this case it's ok + * because the requesting node already has an + * exclusive lock on the inode, so it can't be queued + * for a downconvert. + */ + d_delete(dentry); + dput(dentry); - osb = ocfs2_lock_res_super(lockres); + spin_lock(&dentry_attach_lock); + } + spin_unlock(&dentry_attach_lock); - status = ocfs2_generic_unblock_lock(osb, - lockres, - requeue, - NULL); - if (status < 0) - mlog_errno(status); + /* + * If we are the last holder of this dentry lock, there is no + * reason to downconvert so skip straight to the unlock. + */ + if (dl->dl_count == 1) + return UNBLOCK_STOP_POST; - mlog_exit(status); - return status; + return UNBLOCK_CONTINUE_POST; } void ocfs2_process_blocked_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { int status; - int requeue = 0; + struct ocfs2_unblock_ctl ctl = {0, 0,}; unsigned long flags; /* Our reference to the lockres in this function can be @@ -2822,7 +2829,6 @@ void ocfs2_process_blocked_lock(struct ocfs2_super *osb, BUG_ON(!lockres); BUG_ON(!lockres->l_ops); - BUG_ON(!lockres->l_ops->unblock); mlog(0, "lockres %s blocked.\n", lockres->l_name); @@ -2836,21 +2842,25 @@ void ocfs2_process_blocked_lock(struct ocfs2_super *osb, goto unqueue; spin_unlock_irqrestore(&lockres->l_lock, flags); - status = lockres->l_ops->unblock(lockres, &requeue); + status = ocfs2_unblock_lock(osb, lockres, &ctl); if (status < 0) mlog_errno(status); spin_lock_irqsave(&lockres->l_lock, flags); unqueue: - if (lockres->l_flags & OCFS2_LOCK_FREEING || !requeue) { + if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) { lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED); } else ocfs2_schedule_blocked_lock(osb, lockres); mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name, - requeue ? "yes" : "no"); + ctl.requeue ? "yes" : "no"); spin_unlock_irqrestore(&lockres->l_lock, flags); + if (ctl.unblock_action != UNBLOCK_CONTINUE + && lockres->l_ops->post_unlock) + lockres->l_ops->post_unlock(osb, lockres); + mlog_exit_void(); } @@ -2893,15 +2903,17 @@ void ocfs2_dump_meta_lvb_info(u64 level, mlog(level, "LVB information for %s (called from %s:%u):\n", lockres->l_name, function, line); - mlog(level, "version: %u, clusters: %u\n", - be32_to_cpu(lvb->lvb_version), be32_to_cpu(lvb->lvb_iclusters)); + mlog(level, "version: %u, clusters: %u, generation: 0x%x\n", + lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters), + be32_to_cpu(lvb->lvb_igeneration)); mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n", (unsigned long long)be64_to_cpu(lvb->lvb_isize), be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid), be16_to_cpu(lvb->lvb_imode)); mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, " - "mtime_packed 0x%llx\n", be16_to_cpu(lvb->lvb_inlink), + "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink), (long long)be64_to_cpu(lvb->lvb_iatime_packed), (long long)be64_to_cpu(lvb->lvb_ictime_packed), - (long long)be64_to_cpu(lvb->lvb_imtime_packed)); + (long long)be64_to_cpu(lvb->lvb_imtime_packed), + be32_to_cpu(lvb->lvb_iattr)); } diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index 8f2d1db2d9e..4a276938722 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -27,10 +27,14 @@ #ifndef DLMGLUE_H #define DLMGLUE_H -#define OCFS2_LVB_VERSION 2 +#include "dcache.h" + +#define OCFS2_LVB_VERSION 4 struct ocfs2_meta_lvb { - __be32 lvb_version; + __u8 lvb_version; + __u8 lvb_reserved0; + __be16 lvb_reserved1; __be32 lvb_iclusters; __be32 lvb_iuid; __be32 lvb_igid; @@ -40,7 +44,9 @@ struct ocfs2_meta_lvb { __be64 lvb_isize; __be16 lvb_imode; __be16 lvb_inlink; - __be32 lvb_reserved[3]; + __be32 lvb_iattr; + __be32 lvb_igeneration; + __be32 lvb_reserved2; }; /* ocfs2_meta_lock_full() and ocfs2_data_lock_full() 'arg_flags' flags */ @@ -56,9 +62,14 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb); void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res); void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res, enum ocfs2_lock_type type, + unsigned int generation, struct inode *inode); +void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl, + u64 parent, struct inode *inode); void ocfs2_lock_res_free(struct ocfs2_lock_res *res); int ocfs2_create_new_inode_locks(struct inode *inode); +int ocfs2_create_new_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, int ex, int local); int ocfs2_drop_inode_locks(struct inode *inode); int ocfs2_data_lock_full(struct inode *inode, int write, @@ -92,7 +103,12 @@ void ocfs2_super_unlock(struct ocfs2_super *osb, int ex); int ocfs2_rename_lock(struct ocfs2_super *osb); void ocfs2_rename_unlock(struct ocfs2_super *osb); +int ocfs2_dentry_lock(struct dentry *dentry, int ex); +void ocfs2_dentry_unlock(struct dentry *dentry, int ex); + void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres); +void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres); /* for the vote thread */ void ocfs2_process_blocked_lock(struct ocfs2_super *osb, diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index ec55ab3c121..fb91089a60a 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -33,6 +33,7 @@ #include "dir.h" #include "dlmglue.h" +#include "dcache.h" #include "export.h" #include "inode.h" @@ -57,7 +58,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb, void *vobjp) return ERR_PTR(-ESTALE); } - inode = ocfs2_iget(OCFS2_SB(sb), handle->ih_blkno); + inode = ocfs2_iget(OCFS2_SB(sb), handle->ih_blkno, 0); if (IS_ERR(inode)) { mlog_errno(PTR_ERR(inode)); @@ -77,6 +78,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb, void *vobjp) mlog_errno(-ENOMEM); return ERR_PTR(-ENOMEM); } + result->d_op = &ocfs2_dentry_ops; mlog_exit_ptr(result); return result; @@ -113,7 +115,7 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) goto bail_unlock; } - inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno); + inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0); if (IS_ERR(inode)) { mlog(ML_ERROR, "Unable to create inode %llu\n", (unsigned long long)blkno); @@ -127,6 +129,8 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) parent = ERR_PTR(-ENOMEM); } + parent->d_op = &ocfs2_dentry_ops; + bail_unlock: ocfs2_meta_unlock(dir, 0); diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 1a5c69071df..fcd4475d1f8 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -298,7 +298,7 @@ static int ocfs2_extent_map_find_leaf(struct inode *inode, ret = ocfs2_extent_map_insert(inode, rec, le16_to_cpu(el->l_tree_depth)); - if (ret) { + if (ret && (ret != -EEXIST)) { mlog_errno(ret); goto out_free; } @@ -427,6 +427,11 @@ static int ocfs2_extent_map_insert_entry(struct ocfs2_extent_map *em, /* * Simple rule: on any return code other than -EAGAIN, anything left * in the insert_context will be freed. + * + * Simple rule #2: A return code of -EEXIST from this function or + * its calls to ocfs2_extent_map_insert_entry() signifies that another + * thread beat us to the insert. It is not an actual error, but it + * tells the caller we have no more work to do. */ static int ocfs2_extent_map_try_insert(struct inode *inode, struct ocfs2_extent_rec *rec, @@ -448,22 +453,32 @@ static int ocfs2_extent_map_try_insert(struct inode *inode, goto out_unlock; } + /* Since insert_entry failed, the map MUST have old_ent */ old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos), - le32_to_cpu(rec->e_clusters), NULL, - NULL); + le32_to_cpu(rec->e_clusters), + NULL, NULL); BUG_ON(!old_ent); - ret = -EEXIST; - if (old_ent->e_tree_depth < tree_depth) + if (old_ent->e_tree_depth < tree_depth) { + /* Another thread beat us to the lower tree_depth */ + ret = -EEXIST; goto out_unlock; + } if (old_ent->e_tree_depth == tree_depth) { + /* + * Another thread beat us to this tree_depth. + * Let's make sure we agree with that thread (the + * extent_rec should be identical). + */ if (!memcmp(rec, &old_ent->e_rec, sizeof(struct ocfs2_extent_rec))) ret = 0; + else + /* FIXME: Should this be ESRCH/EBADR??? */ + ret = -EEXIST; - /* FIXME: Should this be ESRCH/EBADR??? */ goto out_unlock; } @@ -599,7 +614,7 @@ static int ocfs2_extent_map_insert(struct inode *inode, tree_depth, &ctxt); } while (ret == -EAGAIN); - if (ret < 0) + if ((ret < 0) && (ret != -EEXIST)) mlog_errno(ret); if (ctxt.left_ent) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index a9559c87453..2bbfa17090c 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -44,6 +44,7 @@ #include "file.h" #include "sysfile.h" #include "inode.h" +#include "ioctl.h" #include "journal.h" #include "mmap.h" #include "suballoc.h" @@ -1227,10 +1228,12 @@ const struct file_operations ocfs2_fops = { .open = ocfs2_file_open, .aio_read = ocfs2_file_aio_read, .aio_write = ocfs2_file_aio_write, + .ioctl = ocfs2_ioctl, }; const struct file_operations ocfs2_dops = { .read = generic_read_dir, .readdir = ocfs2_readdir, .fsync = ocfs2_sync_file, + .ioctl = ocfs2_ioctl, }; diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 327a5b7b86e..16e8e74dc96 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -54,8 +54,6 @@ #include "buffer_head_io.h" -#define OCFS2_FI_FLAG_NOWAIT 0x1 -#define OCFS2_FI_FLAG_DELETE 0x2 struct ocfs2_find_inode_args { u64 fi_blkno; @@ -71,6 +69,26 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *fe_bh); +void ocfs2_set_inode_flags(struct inode *inode) +{ + unsigned int flags = OCFS2_I(inode)->ip_attr; + + inode->i_flags &= ~(S_IMMUTABLE | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + if (flags & OCFS2_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + + if (flags & OCFS2_SYNC_FL) + inode->i_flags |= S_SYNC; + if (flags & OCFS2_APPEND_FL) + inode->i_flags |= S_APPEND; + if (flags & OCFS2_NOATIME_FL) + inode->i_flags |= S_NOATIME; + if (flags & OCFS2_DIRSYNC_FL) + inode->i_flags |= S_DIRSYNC; +} + struct inode *ocfs2_ilookup_for_vote(struct ocfs2_super *osb, u64 blkno, int delete_vote) @@ -89,7 +107,7 @@ struct inode *ocfs2_ilookup_for_vote(struct ocfs2_super *osb, return ilookup5(osb->sb, args.fi_ino, ocfs2_find_actor, &args); } -struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno) +struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, int flags) { struct inode *inode = NULL; struct super_block *sb = osb->sb; @@ -107,7 +125,7 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno) } args.fi_blkno = blkno; - args.fi_flags = 0; + args.fi_flags = flags; args.fi_ino = ino_from_blkno(sb, blkno); inode = iget5_locked(sb, args.fi_ino, ocfs2_find_actor, @@ -251,7 +269,6 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, inode->i_mode = le16_to_cpu(fe->i_mode); inode->i_uid = le32_to_cpu(fe->i_uid); inode->i_gid = le32_to_cpu(fe->i_gid); - inode->i_blksize = (u32)osb->s_clustersize; /* Fast symlinks will have i_size but no allocated clusters. */ if (S_ISLNK(inode->i_mode) && !fe->i_clusters) @@ -260,7 +277,6 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, inode->i_blocks = ocfs2_align_bytes_to_sectors(le64_to_cpu(fe->i_size)); inode->i_mapping->a_ops = &ocfs2_aops; - inode->i_flags |= S_NOATIME; inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime); inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec); inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime); @@ -276,16 +292,13 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); OCFS2_I(inode)->ip_orphaned_slot = OCFS2_INVALID_SLOT; - - if (create_ino) - inode->i_ino = ino_from_blkno(inode->i_sb, - le64_to_cpu(fe->i_blkno)); - - mlog(0, "blkno = %llu, ino = %lu, create_ino = %s\n", - (unsigned long long)fe->i_blkno, inode->i_ino, create_ino ? "true" : "false"); + OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr); inode->i_nlink = le16_to_cpu(fe->i_links_count); + if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) + OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE; + if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) { OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino); @@ -323,12 +336,31 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, break; } + if (create_ino) { + inode->i_ino = ino_from_blkno(inode->i_sb, + le64_to_cpu(fe->i_blkno)); + + /* + * If we ever want to create system files from kernel, + * the generation argument to + * ocfs2_inode_lock_res_init() will have to change. + */ + BUG_ON(fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)); + + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_meta_lockres, + OCFS2_LOCK_TYPE_META, 0, inode); + } + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres, - OCFS2_LOCK_TYPE_RW, inode); - ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_meta_lockres, - OCFS2_LOCK_TYPE_META, inode); + OCFS2_LOCK_TYPE_RW, inode->i_generation, + inode); + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_data_lockres, - OCFS2_LOCK_TYPE_DATA, inode); + OCFS2_LOCK_TYPE_DATA, inode->i_generation, + inode); + + ocfs2_set_inode_flags(inode); + inode->i_flags |= S_NOATIME; status = 0; bail: @@ -343,15 +375,15 @@ static int ocfs2_read_locked_inode(struct inode *inode, struct ocfs2_super *osb; struct ocfs2_dinode *fe; struct buffer_head *bh = NULL; - int status; - int sysfile = 0; + int status, can_lock; + u32 generation = 0; mlog_entry("(0x%p, 0x%p)\n", inode, args); status = -EINVAL; if (inode == NULL || inode->i_sb == NULL) { mlog(ML_ERROR, "bad inode\n"); - goto bail; + return status; } sb = inode->i_sb; osb = OCFS2_SB(sb); @@ -359,50 +391,110 @@ static int ocfs2_read_locked_inode(struct inode *inode, if (!args) { mlog(ML_ERROR, "bad inode args\n"); make_bad_inode(inode); - goto bail; + return status; + } + + /* + * To improve performance of cold-cache inode stats, we take + * the cluster lock here if possible. + * + * Generally, OCFS2 never trusts the contents of an inode + * unless it's holding a cluster lock, so taking it here isn't + * a correctness issue as much as it is a performance + * improvement. + * + * There are three times when taking the lock is not a good idea: + * + * 1) During startup, before we have initialized the DLM. + * + * 2) If we are reading certain system files which never get + * cluster locks (local alloc, truncate log). + * + * 3) If the process doing the iget() is responsible for + * orphan dir recovery. We're holding the orphan dir lock and + * can get into a deadlock with another process on another + * node in ->delete_inode(). + * + * #1 and #2 can be simply solved by never taking the lock + * here for system files (which are the only type we read + * during mount). It's a heavier approach, but our main + * concern is user-accesible files anyway. + * + * #3 works itself out because we'll eventually take the + * cluster lock before trusting anything anyway. + */ + can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE) + && !(args->fi_flags & OCFS2_FI_FLAG_NOLOCK); + + /* + * To maintain backwards compatibility with older versions of + * ocfs2-tools, we still store the generation value for system + * files. The only ones that actually matter to userspace are + * the journals, but it's easier and inexpensive to just flag + * all system files similarly. + */ + if (args->fi_flags & OCFS2_FI_FLAG_SYSFILE) + generation = osb->fs_generation; + + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_meta_lockres, + OCFS2_LOCK_TYPE_META, + generation, inode); + + if (can_lock) { + status = ocfs2_meta_lock(inode, NULL, NULL, 0); + if (status) { + make_bad_inode(inode); + mlog_errno(status); + return status; + } } - /* Read the FE off disk. This is safe because the kernel only - * does one read_inode2 for a new inode, and if it doesn't - * exist yet then nobody can be working on it! */ - status = ocfs2_read_block(osb, args->fi_blkno, &bh, 0, NULL); + status = ocfs2_read_block(osb, args->fi_blkno, &bh, 0, + can_lock ? inode : NULL); if (status < 0) { mlog_errno(status); - make_bad_inode(inode); goto bail; } + status = -EINVAL; fe = (struct ocfs2_dinode *) bh->b_data; if (!OCFS2_IS_VALID_DINODE(fe)) { mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n", (unsigned long long)fe->i_blkno, 7, fe->i_signature); - make_bad_inode(inode); goto bail; } - if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) - sysfile = 1; + /* + * This is a code bug. Right now the caller needs to + * understand whether it is asking for a system file inode or + * not so the proper lock names can be built. + */ + mlog_bug_on_msg(!!(fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) != + !!(args->fi_flags & OCFS2_FI_FLAG_SYSFILE), + "Inode %llu: system file state is ambigous\n", + (unsigned long long)args->fi_blkno); if (S_ISCHR(le16_to_cpu(fe->i_mode)) || S_ISBLK(le16_to_cpu(fe->i_mode))) inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev)); - status = -EINVAL; if (ocfs2_populate_inode(inode, fe, 0) < 0) { mlog(ML_ERROR, "populate failed! i_blkno=%llu, i_ino=%lu\n", (unsigned long long)fe->i_blkno, inode->i_ino); - make_bad_inode(inode); goto bail; } BUG_ON(args->fi_blkno != le64_to_cpu(fe->i_blkno)); - if (sysfile) - OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE; - status = 0; bail: + if (can_lock) + ocfs2_meta_unlock(inode, 0); + + if (status < 0) + make_bad_inode(inode); + if (args && bh) brelse(bh); @@ -875,9 +967,15 @@ void ocfs2_delete_inode(struct inode *inode) goto bail_unlock_inode; } - /* Mark the inode as successfully deleted. This is important - * for ocfs2_clear_inode as it will check this flag and skip - * any checkpointing work */ + /* + * Mark the inode as successfully deleted. + * + * This is important for ocfs2_clear_inode() as it will check + * this flag and skip any checkpointing work + * + * ocfs2_stuff_meta_lvb() also uses this flag to invalidate + * the LVB for other nodes. + */ OCFS2_I(inode)->ip_flags |= OCFS2_INODE_DELETED; bail_unlock_inode: @@ -1002,12 +1100,10 @@ void ocfs2_drop_inode(struct inode *inode) /* Testing ip_orphaned_slot here wouldn't work because we may * not have gotten a delete_inode vote from any other nodes * yet. */ - if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) { - mlog(0, "Inode was orphaned on another node, clearing nlink.\n"); - inode->i_nlink = 0; - } - - generic_drop_inode(inode); + if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) + generic_delete_inode(inode); + else + generic_drop_inode(inode); mlog_exit_void(); } @@ -1027,12 +1123,8 @@ struct buffer_head *ocfs2_bread(struct inode *inode, u64 p_blkno; int readflags = OCFS2_BH_CACHED; -#if 0 - /* only turn this on if we know we can deal with read_block - * returning nothing */ if (reada) readflags |= OCFS2_BH_READAHEAD; -#endif if (((u64)block << inode->i_sb->s_blocksize_bits) >= i_size_read(inode)) { @@ -1131,6 +1223,7 @@ int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle, spin_lock(&OCFS2_I(inode)->ip_lock); fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters); + fe->i_attr = cpu_to_le32(OCFS2_I(inode)->ip_attr); spin_unlock(&OCFS2_I(inode)->ip_lock); fe->i_size = cpu_to_le64(i_size_read(inode)); @@ -1164,17 +1257,16 @@ leave: void ocfs2_refresh_inode(struct inode *inode, struct ocfs2_dinode *fe) { - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - spin_lock(&OCFS2_I(inode)->ip_lock); OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); + OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr); + ocfs2_set_inode_flags(inode); i_size_write(inode, le64_to_cpu(fe->i_size)); inode->i_nlink = le16_to_cpu(fe->i_links_count); inode->i_uid = le32_to_cpu(fe->i_uid); inode->i_gid = le32_to_cpu(fe->i_gid); inode->i_mode = le16_to_cpu(fe->i_mode); - inode->i_blksize = (u32) osb->s_clustersize; if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0) inode->i_blocks = 0; else diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 84c50796128..9957810fdf8 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -56,6 +56,7 @@ struct ocfs2_inode_info struct ocfs2_journal_handle *ip_handle; u32 ip_flags; /* see below */ + u32 ip_attr; /* inode attributes */ /* protected by recovery_lock. */ struct inode *ip_next_orphan; @@ -114,14 +115,20 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode) extern kmem_cache_t *ocfs2_inode_cache; -extern struct address_space_operations ocfs2_aops; +extern const struct address_space_operations ocfs2_aops; struct buffer_head *ocfs2_bread(struct inode *inode, int block, int *err, int reada); void ocfs2_clear_inode(struct inode *inode); void ocfs2_delete_inode(struct inode *inode); void ocfs2_drop_inode(struct inode *inode); -struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff); + +/* Flags for ocfs2_iget() */ +#define OCFS2_FI_FLAG_NOWAIT 0x1 +#define OCFS2_FI_FLAG_DELETE 0x2 +#define OCFS2_FI_FLAG_SYSFILE 0x4 +#define OCFS2_FI_FLAG_NOLOCK 0x8 +struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, int flags); struct inode *ocfs2_ilookup_for_vote(struct ocfs2_super *osb, u64 blkno, int delete_vote); @@ -142,4 +149,6 @@ int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle, int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb); int ocfs2_aio_write(struct file *file, struct kiocb *req, struct iocb *iocb); +void ocfs2_set_inode_flags(struct inode *inode); + #endif /* OCFS2_INODE_H */ diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c new file mode 100644 index 00000000000..3663cef8068 --- /dev/null +++ b/fs/ocfs2/ioctl.c @@ -0,0 +1,136 @@ +/* + * linux/fs/ocfs2/ioctl.c + * + * Copyright (C) 2006 Herbert Poetzl + * adapted from Remy Card's ext2/ioctl.c + */ + +#include <linux/fs.h> +#include <linux/mount.h> + +#define MLOG_MASK_PREFIX ML_INODE +#include <cluster/masklog.h> + +#include "ocfs2.h" +#include "alloc.h" +#include "dlmglue.h" +#include "inode.h" +#include "journal.h" + +#include "ocfs2_fs.h" +#include "ioctl.h" + +#include <linux/ext2_fs.h> + +static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) +{ + int status; + + status = ocfs2_meta_lock(inode, NULL, NULL, 0); + if (status < 0) { + mlog_errno(status); + return status; + } + *flags = OCFS2_I(inode)->ip_attr; + ocfs2_meta_unlock(inode, 0); + + mlog_exit(status); + return status; +} + +static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, + unsigned mask) +{ + struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_journal_handle *handle = NULL; + struct buffer_head *bh = NULL; + unsigned oldflags; + int status; + + mutex_lock(&inode->i_mutex); + + status = ocfs2_meta_lock(inode, NULL, &bh, 1); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = -EROFS; + if (IS_RDONLY(inode)) + goto bail_unlock; + + status = -EACCES; + if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) + goto bail_unlock; + + if (!S_ISDIR(inode->i_mode)) + flags &= ~OCFS2_DIRSYNC_FL; + + handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } + + oldflags = ocfs2_inode->ip_attr; + flags = flags & mask; + flags |= oldflags & ~mask; + + /* + * The IMMUTABLE and APPEND_ONLY flags can only be changed by + * the relevant capability. + */ + status = -EPERM; + if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & + (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { + if (!capable(CAP_LINUX_IMMUTABLE)) + goto bail_unlock; + } + + ocfs2_inode->ip_attr = flags; + ocfs2_set_inode_flags(inode); + + status = ocfs2_mark_inode_dirty(handle, inode, bh); + if (status < 0) + mlog_errno(status); + + ocfs2_commit_trans(handle); +bail_unlock: + ocfs2_meta_unlock(inode, 1); +bail: + mutex_unlock(&inode->i_mutex); + + if (bh) + brelse(bh); + + mlog_exit(status); + return status; +} + +int ocfs2_ioctl(struct inode * inode, struct file * filp, + unsigned int cmd, unsigned long arg) +{ + unsigned int flags; + int status; + + switch (cmd) { + case OCFS2_IOC_GETFLAGS: + status = ocfs2_get_inode_attr(inode, &flags); + if (status < 0) + return status; + + flags &= OCFS2_FL_VISIBLE; + return put_user(flags, (int __user *) arg); + case OCFS2_IOC_SETFLAGS: + if (get_user(flags, (int __user *) arg)) + return -EFAULT; + + return ocfs2_set_inode_attr(inode, flags, + OCFS2_FL_MODIFIABLE); + default: + return -ENOTTY; + } +} + diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h new file mode 100644 index 00000000000..4a7c82931db --- /dev/null +++ b/fs/ocfs2/ioctl.h @@ -0,0 +1,16 @@ +/* + * ioctl.h + * + * Function prototypes + * + * Copyright (C) 2006 Herbert Poetzl + * + */ + +#ifndef OCFS2_IOCTL_H +#define OCFS2_IOCTL_H + +int ocfs2_ioctl(struct inode * inode, struct file * filp, + unsigned int cmd, unsigned long arg); + +#endif /* OCFS2_IOCTL_H */ diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index eebc3cfa6be..fd9734def55 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -49,7 +49,7 @@ #include "buffer_head_io.h" -spinlock_t trans_inc_lock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(trans_inc_lock); static int ocfs2_force_read_journal(struct inode *inode); static int ocfs2_recover_node(struct ocfs2_super *osb, @@ -222,8 +222,7 @@ void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle, BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list)); OCFS2_I(inode)->ip_handle = handle; - list_del(&(OCFS2_I(inode)->ip_handle_list)); - list_add_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list)); + list_move_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list)); } static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle) @@ -785,8 +784,7 @@ int ocfs2_journal_load(struct ocfs2_journal *journal) } /* Launch the commit thread */ - osb->commit_task = kthread_run(ocfs2_commit_thread, osb, "ocfs2cmt-%d", - osb->osb_id); + osb->commit_task = kthread_run(ocfs2_commit_thread, osb, "ocfs2cmt"); if (IS_ERR(osb->commit_task)) { status = PTR_ERR(osb->commit_task); osb->commit_task = NULL; @@ -1119,7 +1117,7 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) goto out; osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb, - "ocfs2rec-%d", osb->osb_id); + "ocfs2rec"); if (IS_ERR(osb->recovery_thread_task)) { mlog_errno((int)PTR_ERR(osb->recovery_thread_task)); osb->recovery_thread_task = NULL; @@ -1495,7 +1493,8 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, if (de->name_len == 2 && !strncmp("..", de->name, 2)) continue; - iter = ocfs2_iget(osb, le64_to_cpu(de->inode)); + iter = ocfs2_iget(osb, le64_to_cpu(de->inode), + OCFS2_FI_FLAG_NOLOCK); if (IS_ERR(iter)) continue; diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 0d1973ea32b..1f17a4d0828 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -840,6 +840,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, mlog(0, "Allocating %u clusters for a new window.\n", ocfs2_local_alloc_window_bits(osb)); + + /* Instruct the allocation code to try the most recently used + * cluster group. We'll re-record the group used this pass + * below. */ + ac->ac_last_group = osb->la_last_gd; + /* we used the generic suballoc reserve function, but we set * everything up nicely, so there's no reason why we can't use * the more specific cluster api to claim bits. */ @@ -852,6 +858,8 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, goto bail; } + osb->la_last_gd = ac->ac_last_group; + la->la_bm_off = cpu_to_le32(cluster_off); alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); /* just in case... In the future when we find space ourselves, diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 843cf9ddefe..83934e33e5b 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -46,12 +46,12 @@ static struct page *ocfs2_nopage(struct vm_area_struct * area, unsigned long address, int *type) { - struct inode *inode = area->vm_file->f_dentry->d_inode; struct page *page = NOPAGE_SIGBUS; sigset_t blocked, oldset; int ret; - mlog_entry("(inode %lu, address %lu)\n", inode->i_ino, address); + mlog_entry("(area=%p, address=%lu, type=%p)\n", area, address, + type); /* The best way to deal with signals in this path is * to block them upfront, rather than allowing the diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 0673862c8bd..849c3b4bb94 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -56,6 +56,7 @@ #include "journal.h" #include "namei.h" #include "suballoc.h" +#include "super.h" #include "symlink.h" #include "sysfile.h" #include "uptodate.h" @@ -178,7 +179,7 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, if (status < 0) goto bail_add; - inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno); + inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0); if (IS_ERR(inode)) { mlog(ML_ERROR, "Unable to create inode %llu\n", (unsigned long long)blkno); @@ -198,10 +199,32 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, spin_unlock(&oi->ip_lock); bail_add: - dentry->d_op = &ocfs2_dentry_ops; ret = d_splice_alias(inode, dentry); + if (inode) { + /* + * If d_splice_alias() finds a DCACHE_DISCONNECTED + * dentry, it will d_move() it on top of ourse. The + * return value will indicate this however, so in + * those cases, we switch them around for the locking + * code. + * + * NOTE: This dentry already has ->d_op set from + * ocfs2_get_parent() and ocfs2_get_dentry() + */ + if (ret) + dentry = ret; + + status = ocfs2_dentry_attach_lock(dentry, inode, + OCFS2_I(dir)->ip_blkno); + if (status) { + mlog_errno(status); + ret = ERR_PTR(status); + goto bail_unlock; + } + } + bail_unlock: /* Don't drop the cluster lock until *after* the d_add -- * unlink on another node will message us to remove that @@ -310,13 +333,6 @@ static int ocfs2_mknod(struct inode *dir, /* get our super block */ osb = OCFS2_SB(dir->i_sb); - if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) { - mlog(ML_ERROR, "inode %llu has i_nlink of %u\n", - (unsigned long long)OCFS2_I(dir)->ip_blkno, dir->i_nlink); - status = -EMLINK; - goto leave; - } - handle = ocfs2_alloc_handle(osb); if (handle == NULL) { status = -ENOMEM; @@ -331,6 +347,11 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } + if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) { + status = -EMLINK; + goto leave; + } + dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data; if (!dirfe->i_links_count) { /* can't make a file in a deleted directory. */ @@ -419,6 +440,13 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } + status = ocfs2_dentry_attach_lock(dentry, inode, + OCFS2_I(dir)->ip_blkno); + if (status) { + mlog_errno(status); + goto leave; + } + insert_inode_hash(inode); dentry->d_op = &ocfs2_dentry_ops; d_instantiate(dentry, inode); @@ -643,11 +671,6 @@ static int ocfs2_link(struct dentry *old_dentry, goto bail; } - if (inode->i_nlink >= OCFS2_LINK_MAX) { - err = -EMLINK; - goto bail; - } - handle = ocfs2_alloc_handle(osb); if (handle == NULL) { err = -ENOMEM; @@ -661,6 +684,11 @@ static int ocfs2_link(struct dentry *old_dentry, goto bail; } + if (!dir->i_nlink) { + err = -ENOENT; + goto bail; + } + err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, dentry->d_name.len); if (err) @@ -726,6 +754,12 @@ static int ocfs2_link(struct dentry *old_dentry, goto bail; } + err = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno); + if (err) { + mlog_errno(err); + goto bail; + } + atomic_inc(&inode->i_count); dentry->d_op = &ocfs2_dentry_ops; d_instantiate(dentry, inode); @@ -744,6 +778,23 @@ bail: return err; } +/* + * Takes and drops an exclusive lock on the given dentry. This will + * force other nodes to drop it. + */ +static int ocfs2_remote_dentry_delete(struct dentry *dentry) +{ + int ret; + + ret = ocfs2_dentry_lock(dentry, 1); + if (ret) + mlog_errno(ret); + else + ocfs2_dentry_unlock(dentry, 1); + + return ret; +} + static int ocfs2_unlink(struct inode *dir, struct dentry *dentry) { @@ -833,8 +884,7 @@ static int ocfs2_unlink(struct inode *dir, else inode->i_nlink--; - status = ocfs2_request_unlink_vote(inode, dentry, - (unsigned int) inode->i_nlink); + status = ocfs2_remote_dentry_delete(dentry); if (status < 0) { /* This vote should succeed under all normal * circumstances. */ @@ -1020,7 +1070,6 @@ static int ocfs2_rename(struct inode *old_dir, struct buffer_head *old_inode_de_bh = NULL; // if old_dentry is a dir, // this is the 1st dirent bh nlink_t old_dir_nlink = old_dir->i_nlink, new_dir_nlink = new_dir->i_nlink; - unsigned int links_count; /* At some point it might be nice to break this function up a * bit. */ @@ -1094,23 +1143,26 @@ static int ocfs2_rename(struct inode *old_dir, } } - if (S_ISDIR(old_inode->i_mode)) { - /* Directories actually require metadata updates to - * the directory info so we can't get away with not - * doing node locking on it. */ - status = ocfs2_meta_lock(old_inode, handle, NULL, 1); - if (status < 0) { - if (status != -ENOENT) - mlog_errno(status); - goto bail; - } - - status = ocfs2_request_rename_vote(old_inode, old_dentry); - if (status < 0) { + /* + * Though we don't require an inode meta data update if + * old_inode is not a directory, we lock anyway here to ensure + * the vote thread on other nodes won't have to concurrently + * downconvert the inode and the dentry locks. + */ + status = ocfs2_meta_lock(old_inode, handle, NULL, 1); + if (status < 0) { + if (status != -ENOENT) mlog_errno(status); - goto bail; - } + goto bail; + } + + status = ocfs2_remote_dentry_delete(old_dentry); + if (status < 0) { + mlog_errno(status); + goto bail; + } + if (S_ISDIR(old_inode->i_mode)) { status = -EIO; old_inode_de_bh = ocfs2_bread(old_inode, 0, &status, 0); if (!old_inode_de_bh) @@ -1124,14 +1176,6 @@ static int ocfs2_rename(struct inode *old_dir, if (!new_inode && new_dir!=old_dir && new_dir->i_nlink >= OCFS2_LINK_MAX) goto bail; - } else { - /* Ah, the simple case - we're a file so just send a - * message. */ - status = ocfs2_request_rename_vote(old_inode, old_dentry); - if (status < 0) { - mlog_errno(status); - goto bail; - } } status = -ENOENT; @@ -1203,13 +1247,7 @@ static int ocfs2_rename(struct inode *old_dir, goto bail; } - if (S_ISDIR(new_inode->i_mode)) - links_count = 0; - else - links_count = (unsigned int) (new_inode->i_nlink - 1); - - status = ocfs2_request_unlink_vote(new_inode, new_dentry, - links_count); + status = ocfs2_remote_dentry_delete(new_dentry); if (status < 0) { mlog_errno(status); goto bail; @@ -1388,6 +1426,7 @@ static int ocfs2_rename(struct inode *old_dir, } } + ocfs2_dentry_move(old_dentry, new_dentry, old_dir, new_dir); status = 0; bail: if (rename_lock) @@ -1676,6 +1715,12 @@ static int ocfs2_symlink(struct inode *dir, goto bail; } + status = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno); + if (status) { + mlog_errno(status); + goto bail; + } + insert_inode_hash(inode); dentry->d_op = &ocfs2_dentry_ops; d_instantiate(dentry, inode); @@ -1964,13 +2009,8 @@ restart: } num++; - /* XXX: questionable readahead stuff here */ bh = ocfs2_bread(dir, b++, &err, 1); bh_use[ra_max] = bh; -#if 0 // ??? - if (bh) - ll_rw_block(READ, 1, &bh); -#endif } } if ((bh = bh_use[ra_ptr++]) == NULL) @@ -1978,6 +2018,10 @@ restart: wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* read error, skip block & hope for the best */ + ocfs2_error(dir->i_sb, "reading directory %llu, " + "offset %lu\n", + (unsigned long long)OCFS2_I(dir)->ip_blkno, + block); brelse(bh); goto next; } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index da1093039c0..0462a7f4e21 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -184,7 +184,6 @@ struct ocfs2_journal; struct ocfs2_journal_handle; struct ocfs2_super { - u32 osb_id; /* id used by the proc interface */ struct task_struct *commit_task; struct super_block *sb; struct inode *root_inode; @@ -198,7 +197,6 @@ struct ocfs2_super struct ocfs2_node_map recovery_map; struct ocfs2_node_map umount_map; - u32 num_clusters; u64 root_blkno; u64 system_dir_blkno; u64 bitmap_blkno; @@ -222,13 +220,11 @@ struct ocfs2_super unsigned long s_mount_opt; u16 max_slots; - u16 num_nodes; s16 node_num; s16 slot_num; int s_sectsize_bits; int s_clustersize; int s_clustersize_bits; - struct proc_dir_entry *proc_sub_dir; /* points to /proc/fs/ocfs2/<maj_min> */ atomic_t vol_state; struct mutex recovery_lock; @@ -240,6 +236,7 @@ struct ocfs2_super enum ocfs2_local_alloc_state local_alloc_state; struct buffer_head *local_alloc_bh; + u64 la_last_gd; /* Next two fields are for local node slot recovery during * mount. */ @@ -294,7 +291,6 @@ struct ocfs2_super }; #define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) -#define OCFS2_MAX_OSB_ID 65536 static inline int ocfs2_should_order_data(struct inode *inode) { diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index c5b1ac547c1..3330a5dc6be 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -114,6 +114,26 @@ #define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */ #define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */ +/* Inode attributes, keep in sync with EXT2 */ +#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ +#define OCFS2_UNRM_FL (0x00000002) /* Undelete */ +#define OCFS2_COMPR_FL (0x00000004) /* Compress file */ +#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ +#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ +#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ +#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ +#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ +#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ + +#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ +#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ + +/* + * ioctl commands + */ +#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) +#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) + /* * Journal Flags (ocfs2_dinode.id1.journal1.i_flags) */ @@ -399,7 +419,9 @@ struct ocfs2_dinode { __le32 i_atime_nsec; __le32 i_ctime_nsec; __le32 i_mtime_nsec; -/*70*/ __le64 i_reserved1[9]; + __le32 i_attr; + __le32 i_reserved1; +/*70*/ __le64 i_reserved2[8]; /*B8*/ union { __le64 i_pad1; /* Generic way to refer to this 64bit union */ diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h index 7dd9e1e705b..4d5d5655c18 100644 --- a/fs/ocfs2/ocfs2_lockid.h +++ b/fs/ocfs2/ocfs2_lockid.h @@ -35,12 +35,15 @@ #define OCFS2_LOCK_ID_MAX_LEN 32 #define OCFS2_LOCK_ID_PAD "000000" +#define OCFS2_DENTRY_LOCK_INO_START 18 + enum ocfs2_lock_type { OCFS2_LOCK_TYPE_META = 0, OCFS2_LOCK_TYPE_DATA, OCFS2_LOCK_TYPE_SUPER, OCFS2_LOCK_TYPE_RENAME, OCFS2_LOCK_TYPE_RW, + OCFS2_LOCK_TYPE_DENTRY, OCFS2_NUM_LOCK_TYPES }; @@ -63,6 +66,9 @@ static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type) case OCFS2_LOCK_TYPE_RW: c = 'W'; break; + case OCFS2_LOCK_TYPE_DENTRY: + c = 'N'; + break; default: c = '\0'; } @@ -70,4 +76,23 @@ static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type) return c; } +static char *ocfs2_lock_type_strings[] = { + [OCFS2_LOCK_TYPE_META] = "Meta", + [OCFS2_LOCK_TYPE_DATA] = "Data", + [OCFS2_LOCK_TYPE_SUPER] = "Super", + [OCFS2_LOCK_TYPE_RENAME] = "Rename", + /* Need to differntiate from [R]ename.. serializing writes is the + * important job it does, anyway. */ + [OCFS2_LOCK_TYPE_RW] = "Write/Read", + [OCFS2_LOCK_TYPE_DENTRY] = "Dentry", +}; + +static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type) +{ +#ifdef __KERNEL__ + mlog_bug_on_msg(type >= OCFS2_NUM_LOCK_TYPES, "%d\n", type); +#endif + return ocfs2_lock_type_strings[type]; +} + #endif /* OCFS2_LOCKID_H */ diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 871627961d6..aa6f5aadedc 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -264,7 +264,7 @@ int ocfs2_find_slot(struct ocfs2_super *osb) osb->slot_num = slot; spin_unlock(&si->si_lock); - mlog(ML_NOTICE, "taking node slot %d\n", osb->slot_num); + mlog(0, "taking node slot %d\n", osb->slot_num); status = ocfs2_update_disk_slots(osb, si); if (status < 0) diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 195523090c8..9d91e66f51a 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -70,12 +70,6 @@ static int ocfs2_block_group_search(struct inode *inode, struct buffer_head *group_bh, u32 bits_wanted, u32 min_bits, u16 *bit_off, u16 *bits_found); -static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, - u32 bits_wanted, - u32 min_bits, - u16 *bit_off, - unsigned int *num_bits, - u64 *bg_blkno); static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, struct ocfs2_alloc_context *ac, u32 bits_wanted, @@ -85,11 +79,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, u64 *bg_blkno); static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, int nr); -static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, - struct buffer_head *bg_bh, - unsigned int bits_wanted, - u16 *bit_off, - u16 *bits_found); static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, @@ -143,6 +132,64 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); } +/* somewhat more expensive than our other checks, so use sparingly. */ +static int ocfs2_check_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct ocfs2_group_desc *gd) +{ + unsigned int max_bits; + + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd); + return -EIO; + } + + if (di->i_blkno != gd->bg_parent_dinode) { + ocfs2_error(sb, "Group descriptor # %llu has bad parent " + "pointer (%llu, expected %llu)", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), + (unsigned long long)le64_to_cpu(di->i_blkno)); + return -EIO; + } + + max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc); + if (le16_to_cpu(gd->bg_bits) > max_bits) { + ocfs2_error(sb, "Group descriptor # %llu has bit count of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_chain) >= + le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { + ocfs2_error(sb, "Group descriptor # %llu has bad chain %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_chain)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { + ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " + "claims that %u are free", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + le16_to_cpu(gd->bg_free_bits_count)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { + ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " + "max bitmap bits of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + 8 * le16_to_cpu(gd->bg_size)); + return -EIO; + } + + return 0; +} + static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, struct inode *alloc_inode, struct buffer_head *bg_bh, @@ -663,6 +710,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, struct buffer_head *bg_bh, unsigned int bits_wanted, + unsigned int total_bits, u16 *bit_off, u16 *bits_found) { @@ -679,10 +727,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, found = start = best_offset = best_size = 0; bitmap = bg->bg_bitmap; - while((offset = ocfs2_find_next_zero_bit(bitmap, - le16_to_cpu(bg->bg_bits), - start)) != -1) { - if (offset == le16_to_cpu(bg->bg_bits)) + while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) { + if (offset == total_bits) break; if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) { @@ -911,14 +957,35 @@ static int ocfs2_cluster_group_search(struct inode *inode, { int search = -ENOSPC; int ret; - struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data; + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data; u16 tmp_off, tmp_found; + unsigned int max_bits, gd_cluster_off; BUG_ON(!ocfs2_is_cluster_bitmap(inode)); - if (bg->bg_free_bits_count) { + if (gd->bg_free_bits_count) { + max_bits = le16_to_cpu(gd->bg_bits); + + /* Tail groups in cluster bitmaps which aren't cpg + * aligned are prone to partial extention by a failed + * fs resize. If the file system resize never got to + * update the dinode cluster count, then we don't want + * to trust any clusters past it, regardless of what + * the group descriptor says. */ + gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb, + le64_to_cpu(gd->bg_blkno)); + if ((gd_cluster_off + max_bits) > + OCFS2_I(inode)->ip_clusters) { + max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; + mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + OCFS2_I(inode)->ip_clusters, max_bits); + } + ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), group_bh, bits_wanted, + max_bits, &tmp_off, &tmp_found); if (ret) return ret; @@ -951,17 +1018,109 @@ static int ocfs2_block_group_search(struct inode *inode, if (bg->bg_free_bits_count) ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), group_bh, bits_wanted, + le16_to_cpu(bg->bg_bits), bit_off, bits_found); return ret; } +static int ocfs2_alloc_dinode_update_counts(struct inode *inode, + struct ocfs2_journal_handle *handle, + struct buffer_head *di_bh, + u32 num_bits, + u16 chain) +{ + int ret; + u32 tmp_used; + struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; + struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; + + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); + di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used); + le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits); + + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + +static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, + u32 bits_wanted, + u32 min_bits, + u16 *bit_off, + unsigned int *num_bits, + u64 gd_blkno, + u16 *bits_left) +{ + int ret; + u16 found; + struct buffer_head *group_bh = NULL; + struct ocfs2_group_desc *gd; + struct inode *alloc_inode = ac->ac_inode; + struct ocfs2_journal_handle *handle = ac->ac_handle; + + ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno, + &group_bh, OCFS2_BH_CACHED, alloc_inode); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + gd = (struct ocfs2_group_desc *) group_bh->b_data; + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd); + ret = -EIO; + goto out; + } + + ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, + bit_off, &found); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + + *num_bits = found; + + ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, + *num_bits, + le16_to_cpu(gd->bg_chain)); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, + *bit_off, *num_bits); + if (ret < 0) + mlog_errno(ret); + + *bits_left = le16_to_cpu(gd->bg_free_bits_count); + +out: + brelse(group_bh); + + return ret; +} + static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, u32 bits_wanted, u32 min_bits, u16 *bit_off, unsigned int *num_bits, - u64 *bg_blkno) + u64 *bg_blkno, + u16 *bits_left) { int status; u16 chain, tmp_bits; @@ -988,9 +1147,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); + if (status) { + mlog_errno(status); goto bail; } @@ -1018,9 +1177,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); + if (status) { + mlog_errno(status); goto bail; } } @@ -1099,6 +1258,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, (unsigned long long)fe->i_blkno); *bg_blkno = le64_to_cpu(bg->bg_blkno); + *bits_left = le16_to_cpu(bg->bg_free_bits_count); bail: if (group_bh) brelse(group_bh); @@ -1120,6 +1280,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, { int status; u16 victim, i; + u16 bits_left = 0; + u64 hint_blkno = ac->ac_last_group; struct ocfs2_chain_list *cl; struct ocfs2_dinode *fe; @@ -1146,6 +1308,28 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, goto bail; } + if (hint_blkno) { + /* Attempt to short-circuit the usual search mechanism + * by jumping straight to the most recently used + * allocation group. This helps us mantain some + * contiguousness across allocations. */ + status = ocfs2_search_one_group(ac, bits_wanted, min_bits, + bit_off, num_bits, + hint_blkno, &bits_left); + if (!status) { + /* Be careful to update *bg_blkno here as the + * caller is expecting it to be filled in, and + * ocfs2_search_one_group() won't do that for + * us. */ + *bg_blkno = hint_blkno; + goto set_hint; + } + if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + } + cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; victim = ocfs2_find_victim_chain(cl); @@ -1153,9 +1337,9 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, ac->ac_allow_chain_relink = 1; status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off, - num_bits, bg_blkno); + num_bits, bg_blkno, &bits_left); if (!status) - goto bail; + goto set_hint; if (status < 0 && status != -ENOSPC) { mlog_errno(status); goto bail; @@ -1177,8 +1361,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, ac->ac_chain = i; status = ocfs2_search_chain(ac, bits_wanted, min_bits, - bit_off, num_bits, - bg_blkno); + bit_off, num_bits, bg_blkno, + &bits_left); if (!status) break; if (status < 0 && status != -ENOSPC) { @@ -1186,8 +1370,19 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, goto bail; } } -bail: +set_hint: + if (status != -ENOSPC) { + /* If the next search of this group is not likely to + * yield a suitable extent, then we reset the last + * group hint so as to not waste a disk read */ + if (bits_left < min_bits) + ac->ac_last_group = 0; + else + ac->ac_last_group = *bg_blkno; + } + +bail: mlog_exit(status); return status; } @@ -1341,7 +1536,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb, { int status; unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; - u64 bg_blkno; + u64 bg_blkno = 0; u16 bg_bit_off; mlog_entry_void(); @@ -1494,9 +1689,9 @@ static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle, } group = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(group)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, group); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group); + if (status) { + mlog_errno(status); goto bail; } BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index a76c82a7cea..c787838d105 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -49,6 +49,8 @@ struct ocfs2_alloc_context { u16 ac_chain; int ac_allow_chain_relink; group_search_t *ac_group_search; + + u64 ac_last_group; }; void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index cdf73393f09..4c29cd7cc8e 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -68,13 +68,6 @@ #include "buffer_head_io.h" -/* - * Globals - */ -static spinlock_t ocfs2_globals_lock = SPIN_LOCK_UNLOCKED; - -static u32 osb_id; /* Keeps track of next available OSB Id */ - static kmem_cache_t *ocfs2_inode_cachep = NULL; kmem_cache_t *ocfs2_lock_cache = NULL; @@ -209,7 +202,7 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) mlog_entry_void(); - new = ocfs2_iget(osb, osb->root_blkno); + new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE); if (IS_ERR(new)) { status = PTR_ERR(new); mlog_errno(status); @@ -217,7 +210,7 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) } osb->root_inode = new; - new = ocfs2_iget(osb, osb->system_dir_blkno); + new = ocfs2_iget(osb, osb->system_dir_blkno, OCFS2_FI_FLAG_SYSFILE); if (IS_ERR(new)) { status = PTR_ERR(new); mlog_errno(status); @@ -642,10 +635,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) ocfs2_complete_mount_recovery(osb); - printk("ocfs2: Mounting device (%u,%u) on (node %d, slot %d) with %s " - "data mode.\n", - MAJOR(sb->s_dev), MINOR(sb->s_dev), osb->node_num, - osb->slot_num, + printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %d, slot %d) " + "with %s data mode.\n", + osb->dev_str, osb->node_num, osb->slot_num, osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : "ordered"); @@ -690,7 +682,7 @@ static struct file_system_type ocfs2_fs_type = { .kill_sb = kill_block_super, /* set to the generic one * right now, but do we * need to change that? */ - .fs_flags = FS_REQUIRES_DEV, + .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, .next = NULL }; @@ -800,10 +792,6 @@ static int __init ocfs2_init(void) goto leave; } - spin_lock(&ocfs2_globals_lock); - osb_id = 0; - spin_unlock(&ocfs2_globals_lock); - ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL); if (!ocfs2_debugfs_root) { status = -EFAULT; @@ -1020,7 +1008,7 @@ static int ocfs2_fill_local_node_info(struct ocfs2_super *osb) goto bail; } - mlog(ML_NOTICE, "I am node %d\n", osb->node_num); + mlog(0, "I am node %d\n", osb->node_num); status = 0; bail: @@ -1191,8 +1179,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) atomic_set(&osb->vol_state, VOLUME_DISMOUNTED); - printk("ocfs2: Unmounting device (%u,%u) on (node %d)\n", - MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev), osb->node_num); + printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %d)\n", + osb->dev_str, osb->node_num); ocfs2_delete_osb(osb); kfree(osb); @@ -1212,8 +1200,6 @@ static int ocfs2_setup_osb_uuid(struct ocfs2_super *osb, const unsigned char *uu if (osb->uuid_str == NULL) return -ENOMEM; - memcpy(osb->uuid, uuid, OCFS2_VOL_UUID_LEN); - for (i = 0, ptr = osb->uuid_str; i < OCFS2_VOL_UUID_LEN; i++) { /* print with null */ ret = snprintf(ptr, 3, "%02X", uuid[i]); @@ -1311,13 +1297,6 @@ static int ocfs2_initialize_super(struct super_block *sb, goto bail; } - osb->uuid = kmalloc(OCFS2_VOL_UUID_LEN, GFP_KERNEL); - if (!osb->uuid) { - mlog(ML_ERROR, "unable to alloc uuid\n"); - status = -ENOMEM; - goto bail; - } - di = (struct ocfs2_dinode *)bh->b_data; osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots); @@ -1327,7 +1306,7 @@ static int ocfs2_initialize_super(struct super_block *sb, status = -EINVAL; goto bail; } - mlog(ML_NOTICE, "max_slots for this device: %u\n", osb->max_slots); + mlog(0, "max_slots for this device: %u\n", osb->max_slots); init_waitqueue_head(&osb->osb_wipe_event); osb->osb_orphan_wipes = kcalloc(osb->max_slots, @@ -1418,7 +1397,7 @@ static int ocfs2_initialize_super(struct super_block *sb, goto bail; } - memcpy(&uuid_net_key, &osb->uuid[i], sizeof(osb->net_key)); + memcpy(&uuid_net_key, di->id2.i_super.s_uuid, sizeof(uuid_net_key)); osb->net_key = le32_to_cpu(uuid_net_key); strncpy(osb->vol_label, di->id2.i_super.s_label, 63); @@ -1463,8 +1442,13 @@ static int ocfs2_initialize_super(struct super_block *sb, osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; + /* We don't have a cluster lock on the bitmap here because + * we're only interested in static information and the extra + * complexity at mount time isn't worht it. Don't pass the + * inode in to the read function though as we don't want it to + * be put in the cache. */ status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0, - inode); + NULL); iput(inode); if (status < 0) { mlog_errno(status); @@ -1473,7 +1457,6 @@ static int ocfs2_initialize_super(struct super_block *sb, di = (struct ocfs2_dinode *) bitmap_bh->b_data; osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); - osb->num_clusters = le32_to_cpu(di->id1.bitmap1.i_total); brelse(bitmap_bh); mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n", (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg); @@ -1484,18 +1467,6 @@ static int ocfs2_initialize_super(struct super_block *sb, goto bail; } - /* Link this osb onto the global linked list of all osb structures. */ - /* The Global Link List is mainted for the whole driver . */ - spin_lock(&ocfs2_globals_lock); - osb->osb_id = osb_id; - if (osb_id < OCFS2_MAX_OSB_ID) - osb_id++; - else { - mlog(ML_ERROR, "Too many volumes mounted\n"); - status = -ENOMEM; - } - spin_unlock(&ocfs2_globals_lock); - bail: mlog_exit(status); return status; diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 0c8a1294ec9..c0f68aa6c17 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -154,7 +154,7 @@ static void *ocfs2_follow_link(struct dentry *dentry, } status = vfs_follow_link(nd, link); - if (status) + if (status && status != -ENOENT) mlog_errno(status); bail: if (page) { diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c index fc29cb7a437..5df6e35d09b 100644 --- a/fs/ocfs2/sysfile.c +++ b/fs/ocfs2/sysfile.c @@ -28,11 +28,11 @@ #include <linux/slab.h> #include <linux/highmem.h> -#include "ocfs2.h" - #define MLOG_MASK_PREFIX ML_INODE #include <cluster/masklog.h> +#include "ocfs2.h" + #include "alloc.h" #include "dir.h" #include "inode.h" @@ -115,7 +115,7 @@ static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb, goto bail; } - inode = ocfs2_iget(osb, blkno); + inode = ocfs2_iget(osb, blkno, OCFS2_FI_FLAG_SYSFILE); if (IS_ERR(inode)) { mlog_errno(PTR_ERR(inode)); inode = NULL; diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index b8a00a79332..9707ed7a320 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c @@ -206,7 +206,10 @@ static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi, } /* Warning: even if it returns true, this does *not* guarantee that - * the block is stored in our inode metadata cache. */ + * the block is stored in our inode metadata cache. + * + * This can be called under lock_buffer() + */ int ocfs2_buffer_uptodate(struct inode *inode, struct buffer_head *bh) { @@ -226,6 +229,16 @@ int ocfs2_buffer_uptodate(struct inode *inode, return ocfs2_buffer_cached(OCFS2_I(inode), bh); } +/* + * Determine whether a buffer is currently out on a read-ahead request. + * ip_io_sem should be held to serialize submitters with the logic here. + */ +int ocfs2_buffer_read_ahead(struct inode *inode, + struct buffer_head *bh) +{ + return buffer_locked(bh) && ocfs2_buffer_cached(OCFS2_I(inode), bh); +} + /* Requires ip_lock */ static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, sector_t block) @@ -403,7 +416,11 @@ out_free: * * Note that this function may actually fail to insert the block if * memory cannot be allocated. This is not fatal however (but may - * result in a performance penalty) */ + * result in a performance penalty) + * + * Readahead buffers can be passed in here before the I/O request is + * completed. + */ void ocfs2_set_buffer_uptodate(struct inode *inode, struct buffer_head *bh) { diff --git a/fs/ocfs2/uptodate.h b/fs/ocfs2/uptodate.h index 01cd32d26b0..2e73206059a 100644 --- a/fs/ocfs2/uptodate.h +++ b/fs/ocfs2/uptodate.h @@ -40,5 +40,7 @@ void ocfs2_set_new_buffer_uptodate(struct inode *inode, struct buffer_head *bh); void ocfs2_remove_from_cache(struct inode *inode, struct buffer_head *bh); +int ocfs2_buffer_read_ahead(struct inode *inode, + struct buffer_head *bh); #endif /* OCFS2_UPTODATE_H */ diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c index ee42765a855..5b4dca79990 100644 --- a/fs/ocfs2/vote.c +++ b/fs/ocfs2/vote.c @@ -74,9 +74,6 @@ struct ocfs2_vote_msg __be32 v_orphaned_slot; /* Used during delete votes */ __be32 v_nlink; /* Used during unlink votes */ } md1; /* Message type dependant 1 */ - __be32 v_unlink_namelen; - __be64 v_unlink_parent; - u8 v_unlink_dirent[OCFS2_VOTE_FILENAME_LEN]; }; /* Responses are given these values to maintain backwards @@ -100,8 +97,6 @@ struct ocfs2_vote_work { enum ocfs2_vote_request { OCFS2_VOTE_REQ_INVALID = 0, OCFS2_VOTE_REQ_DELETE, - OCFS2_VOTE_REQ_UNLINK, - OCFS2_VOTE_REQ_RENAME, OCFS2_VOTE_REQ_MOUNT, OCFS2_VOTE_REQ_UMOUNT, OCFS2_VOTE_REQ_LAST @@ -261,103 +256,13 @@ done: return response; } -static int ocfs2_match_dentry(struct dentry *dentry, - u64 parent_blkno, - unsigned int namelen, - const char *name) -{ - struct inode *parent; - - if (!dentry->d_parent) { - mlog(0, "Detached from parent.\n"); - return 0; - } - - parent = dentry->d_parent->d_inode; - /* Negative parent dentry? */ - if (!parent) - return 0; - - /* Name is in a different directory. */ - if (OCFS2_I(parent)->ip_blkno != parent_blkno) - return 0; - - if (dentry->d_name.len != namelen) - return 0; - - /* comparison above guarantees this is safe. */ - if (memcmp(dentry->d_name.name, name, namelen)) - return 0; - - return 1; -} - -static void ocfs2_process_dentry_request(struct inode *inode, - int rename, - unsigned int new_nlink, - u64 parent_blkno, - unsigned int namelen, - const char *name) -{ - struct dentry *dentry = NULL; - struct list_head *p; - struct ocfs2_inode_info *oi = OCFS2_I(inode); - - mlog(0, "parent %llu, namelen = %u, name = %.*s\n", - (unsigned long long)parent_blkno, namelen, namelen, name); - - spin_lock(&dcache_lock); - - /* Another node is removing this name from the system. It is - * up to us to find the corresponding dentry and if it exists, - * unhash it from the dcache. */ - list_for_each(p, &inode->i_dentry) { - dentry = list_entry(p, struct dentry, d_alias); - - if (ocfs2_match_dentry(dentry, parent_blkno, namelen, name)) { - mlog(0, "dentry found: %.*s\n", - dentry->d_name.len, dentry->d_name.name); - - dget_locked(dentry); - break; - } - - dentry = NULL; - } - - spin_unlock(&dcache_lock); - - if (dentry) { - d_delete(dentry); - dput(dentry); - } - - /* rename votes don't send link counts */ - if (!rename) { - mlog(0, "new_nlink = %u\n", new_nlink); - - /* We don't have the proper locks here to directly - * change i_nlink and besides, the vote is sent - * *before* the operation so it may have failed on the - * other node. This passes a hint to ocfs2_drop_inode - * to force ocfs2_delete_inode, who will take the - * proper cluster locks to sort things out. */ - if (new_nlink == 0) { - spin_lock(&oi->ip_lock); - oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; - spin_unlock(&OCFS2_I(inode)->ip_lock); - } - } -} - static void ocfs2_process_vote(struct ocfs2_super *osb, struct ocfs2_vote_msg *msg) { int net_status, vote_response; int orphaned_slot = 0; - int rename = 0; - unsigned int node_num, generation, new_nlink, namelen; - u64 blkno, parent_blkno; + unsigned int node_num, generation; + u64 blkno; enum ocfs2_vote_request request; struct inode *inode = NULL; struct ocfs2_msg_hdr *hdr = &msg->v_hdr; @@ -437,18 +342,6 @@ static void ocfs2_process_vote(struct ocfs2_super *osb, vote_response = ocfs2_process_delete_request(inode, &orphaned_slot); break; - case OCFS2_VOTE_REQ_RENAME: - rename = 1; - /* fall through */ - case OCFS2_VOTE_REQ_UNLINK: - parent_blkno = be64_to_cpu(msg->v_unlink_parent); - namelen = be32_to_cpu(msg->v_unlink_namelen); - /* new_nlink will be ignored in case of a rename vote */ - new_nlink = be32_to_cpu(msg->md1.v_nlink); - ocfs2_process_dentry_request(inode, rename, new_nlink, - parent_blkno, namelen, - msg->v_unlink_dirent); - break; default: mlog(ML_ERROR, "node %u, invalid request: %u\n", node_num, request); @@ -889,75 +782,6 @@ int ocfs2_request_delete_vote(struct inode *inode) return status; } -static void ocfs2_setup_unlink_vote(struct ocfs2_vote_msg *request, - struct dentry *dentry) -{ - struct inode *parent = dentry->d_parent->d_inode; - - /* We need some values which will uniquely identify a dentry - * on the other nodes so that they can find it and run - * d_delete against it. Parent directory block and full name - * should suffice. */ - - mlog(0, "unlink/rename request: parent: %llu name: %.*s\n", - (unsigned long long)OCFS2_I(parent)->ip_blkno, dentry->d_name.len, - dentry->d_name.name); - - request->v_unlink_parent = cpu_to_be64(OCFS2_I(parent)->ip_blkno); - request->v_unlink_namelen = cpu_to_be32(dentry->d_name.len); - memcpy(request->v_unlink_dirent, dentry->d_name.name, - dentry->d_name.len); -} - -int ocfs2_request_unlink_vote(struct inode *inode, - struct dentry *dentry, - unsigned int nlink) -{ - int status; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct ocfs2_vote_msg *request; - - if (dentry->d_name.len > OCFS2_VOTE_FILENAME_LEN) - return -ENAMETOOLONG; - - status = -ENOMEM; - request = ocfs2_new_vote_request(osb, OCFS2_I(inode)->ip_blkno, - inode->i_generation, - OCFS2_VOTE_REQ_UNLINK, nlink); - if (request) { - ocfs2_setup_unlink_vote(request, dentry); - - status = ocfs2_request_vote(inode, request, NULL); - - kfree(request); - } - return status; -} - -int ocfs2_request_rename_vote(struct inode *inode, - struct dentry *dentry) -{ - int status; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct ocfs2_vote_msg *request; - - if (dentry->d_name.len > OCFS2_VOTE_FILENAME_LEN) - return -ENAMETOOLONG; - - status = -ENOMEM; - request = ocfs2_new_vote_request(osb, OCFS2_I(inode)->ip_blkno, - inode->i_generation, - OCFS2_VOTE_REQ_RENAME, 0); - if (request) { - ocfs2_setup_unlink_vote(request, dentry); - - status = ocfs2_request_vote(inode, request, NULL); - - kfree(request); - } - return status; -} - int ocfs2_request_mount_vote(struct ocfs2_super *osb) { int status; @@ -988,9 +812,7 @@ int ocfs2_request_mount_vote(struct ocfs2_super *osb) } bail: - if (request) - kfree(request); - + kfree(request); return status; } @@ -1021,9 +843,7 @@ int ocfs2_request_umount_vote(struct ocfs2_super *osb) } bail: - if (request) - kfree(request); - + kfree(request); return status; } diff --git a/fs/ocfs2/vote.h b/fs/ocfs2/vote.h index 9cce6070346..53ebc1c69e5 100644 --- a/fs/ocfs2/vote.h +++ b/fs/ocfs2/vote.h @@ -39,11 +39,6 @@ static inline void ocfs2_kick_vote_thread(struct ocfs2_super *osb) } int ocfs2_request_delete_vote(struct inode *inode); -int ocfs2_request_unlink_vote(struct inode *inode, - struct dentry *dentry, - unsigned int nlink); -int ocfs2_request_rename_vote(struct inode *inode, - struct dentry *dentry); int ocfs2_request_mount_vote(struct ocfs2_super *osb); int ocfs2_request_umount_vote(struct ocfs2_super *osb); int ocfs2_register_net_handlers(struct ocfs2_super *osb); |