summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/debugfs/file.c36
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/lock.c142
-rw-r--r--fs/dlm/lock.h3
-rw-r--r--fs/dlm/lockspace.c3
-rw-r--r--fs/dlm/lowcomms.c23
-rw-r--r--fs/dlm/member.c41
-rw-r--r--fs/dlm/midcomms.c17
-rw-r--r--fs/dlm/rcom.c36
-rw-r--r--fs/dlm/rcom.h5
-rw-r--r--fs/dlm/recoverd.c11
-rw-r--r--fs/dlm/requestqueue.c58
-rw-r--r--fs/dlm/requestqueue.h4
-rw-r--r--fs/gfs2/bmap.c35
-rw-r--r--fs/gfs2/daemon.c24
-rw-r--r--fs/gfs2/daemon.h1
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/eaops.c8
-rw-r--r--fs/gfs2/eaops.h4
-rw-r--r--fs/gfs2/glock.c293
-rw-r--r--fs/gfs2/glock.h5
-rw-r--r--fs/gfs2/glops.c24
-rw-r--r--fs/gfs2/incore.h31
-rw-r--r--fs/gfs2/inode.c78
-rw-r--r--fs/gfs2/inode.h3
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h1
-rw-r--r--fs/gfs2/locking/dlm/plock.c11
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c2
-rw-r--r--fs/gfs2/locking/dlm/thread.c20
-rw-r--r--fs/gfs2/locking/nolock/main.c1
-rw-r--r--fs/gfs2/log.c230
-rw-r--r--fs/gfs2/log.h2
-rw-r--r--fs/gfs2/lops.c470
-rw-r--r--fs/gfs2/main.c3
-rw-r--r--fs/gfs2/meta_io.c136
-rw-r--r--fs/gfs2/meta_io.h6
-rw-r--r--fs/gfs2/mount.c5
-rw-r--r--fs/gfs2/ops_address.c146
-rw-r--r--fs/gfs2/ops_export.c2
-rw-r--r--fs/gfs2/ops_file.c13
-rw-r--r--fs/gfs2/ops_fstype.c40
-rw-r--r--fs/gfs2/ops_inode.c38
-rw-r--r--fs/gfs2/ops_super.c14
-rw-r--r--fs/gfs2/quota.c13
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/gfs2/rgrp.c39
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/trans.c22
-rw-r--r--fs/gfs2/trans.h2
-rw-r--r--fs/ntfs/ChangeLog12
-rw-r--r--fs/ntfs/Makefile2
-rw-r--r--fs/ntfs/aops.c22
-rw-r--r--fs/ntfs/attrib.c8
-rw-r--r--fs/ntfs/file.c36
-rw-r--r--fs/ntfs/inode.c3
-rw-r--r--fs/ntfs/logfile.c143
-rw-r--r--fs/ntfs/runlist.c4
-rw-r--r--fs/ocfs2/alloc.c482
-rw-r--r--fs/ocfs2/alloc.h7
-rw-r--r--fs/ocfs2/aops.c309
-rw-r--r--fs/ocfs2/aops.h6
-rw-r--r--fs/ocfs2/cluster/masklog.c3
-rw-r--r--fs/ocfs2/dir.c1423
-rw-r--r--fs/ocfs2/dir.h48
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/dlmglue.h4
-rw-r--r--fs/ocfs2/export.c8
-rw-r--r--fs/ocfs2/extent_map.c6
-rw-r--r--fs/ocfs2/file.c298
-rw-r--r--fs/ocfs2/file.h2
-rw-r--r--fs/ocfs2/inode.c7
-rw-r--r--fs/ocfs2/inode.h1
-rw-r--r--fs/ocfs2/journal.c120
-rw-r--r--fs/ocfs2/journal.h3
-rw-r--r--fs/ocfs2/namei.c552
-rw-r--r--fs/ocfs2/namei.h19
-rw-r--r--fs/ocfs2/ocfs2.h7
-rw-r--r--fs/ocfs2/ocfs2_fs.h64
-rw-r--r--fs/ocfs2/super.c62
-rw-r--r--fs/ocfs2/sysfile.c10
-rw-r--r--fs/partitions/check.c12
-rw-r--r--fs/sysfs/bin.c36
-rw-r--r--fs/sysfs/dir.c754
-rw-r--r--fs/sysfs/file.c248
-rw-r--r--fs/sysfs/group.c2
-rw-r--r--fs/sysfs/inode.c103
-rw-r--r--fs/sysfs/mount.c26
-rw-r--r--fs/sysfs/symlink.c34
-rw-r--r--fs/sysfs/sysfs.h184
90 files changed, 4309 insertions, 2875 deletions
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 2e124e0075c..a9b99c0dc2e 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -221,6 +221,42 @@ struct dentry *debugfs_create_u64(const char *name, mode_t mode,
}
EXPORT_SYMBOL_GPL(debugfs_create_u64);
+DEFINE_SIMPLE_ATTRIBUTE(fops_x8, debugfs_u8_get, debugfs_u8_set, "0x%02llx\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_x16, debugfs_u16_get, debugfs_u16_set, "0x%04llx\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n");
+
+/**
+ * debugfs_create_x8 - create a debugfs file that is used to read and write an unsigned 8-bit value
+ * debugfs_create_x16 - create a debugfs file that is used to read and write an unsigned 16-bit value
+ * debugfs_create_x32 - create a debugfs file that is used to read and write an unsigned 32-bit value
+ *
+ * These functions are exactly the same as the above functions, (but use a hex
+ * output for the decimal challenged) for details look at the above unsigned
+ * decimal functions.
+ */
+struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+ struct dentry *parent, u8 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_x8);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_x8);
+
+struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+ struct dentry *parent, u16 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_x16);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_x16);
+
+struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+ struct dentry *parent, u32 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_x32);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_x32);
+
static ssize_t read_file_bool(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 74901e981e1..d2fc2384c3b 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -491,6 +491,7 @@ struct dlm_ls {
uint64_t ls_recover_seq;
struct dlm_recover *ls_recover_args;
struct rw_semaphore ls_in_recovery; /* block local requests */
+ struct rw_semaphore ls_recv_active; /* block dlm_recv */
struct list_head ls_requestqueue;/* queue remote requests */
struct mutex ls_requestqueue_mutex;
char *ls_recover_buf;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 2082daf083d..3915b8e1414 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -3638,55 +3638,8 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
dlm_put_lkb(lkb);
}
-int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
+static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
{
- struct dlm_message *ms = (struct dlm_message *) hd;
- struct dlm_ls *ls;
- int error = 0;
-
- if (!recovery)
- dlm_message_in(ms);
-
- ls = dlm_find_lockspace_global(hd->h_lockspace);
- if (!ls) {
- log_print("drop message %d from %d for unknown lockspace %d",
- ms->m_type, nodeid, hd->h_lockspace);
- return -EINVAL;
- }
-
- /* recovery may have just ended leaving a bunch of backed-up requests
- in the requestqueue; wait while dlm_recoverd clears them */
-
- if (!recovery)
- dlm_wait_requestqueue(ls);
-
- /* recovery may have just started while there were a bunch of
- in-flight requests -- save them in requestqueue to be processed
- after recovery. we can't let dlm_recvd block on the recovery
- lock. if dlm_recoverd is calling this function to clear the
- requestqueue, it needs to be interrupted (-EINTR) if another
- recovery operation is starting. */
-
- while (1) {
- if (dlm_locking_stopped(ls)) {
- if (recovery) {
- error = -EINTR;
- goto out;
- }
- error = dlm_add_requestqueue(ls, nodeid, hd);
- if (error == -EAGAIN)
- continue;
- else {
- error = -EINTR;
- goto out;
- }
- }
-
- if (dlm_lock_recovery_try(ls))
- break;
- schedule();
- }
-
switch (ms->m_type) {
/* messages sent to a master node */
@@ -3761,17 +3714,90 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
log_error(ls, "unknown message type %d", ms->m_type);
}
- dlm_unlock_recovery(ls);
- out:
- dlm_put_lockspace(ls);
dlm_astd_wake();
- return error;
}
+/* If the lockspace is in recovery mode (locking stopped), then normal
+ messages are saved on the requestqueue for processing after recovery is
+ done. When not in recovery mode, we wait for dlm_recoverd to drain saved
+ messages off the requestqueue before we process new ones. This occurs right
+ after recovery completes when we transition from saving all messages on
+ requestqueue, to processing all the saved messages, to processing new
+ messages as they arrive. */
-/*
- * Recovery related
- */
+static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+ int nodeid)
+{
+ if (dlm_locking_stopped(ls)) {
+ dlm_add_requestqueue(ls, nodeid, (struct dlm_header *) ms);
+ } else {
+ dlm_wait_requestqueue(ls);
+ _receive_message(ls, ms);
+ }
+}
+
+/* This is called by dlm_recoverd to process messages that were saved on
+ the requestqueue. */
+
+void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms)
+{
+ _receive_message(ls, ms);
+}
+
+/* This is called by the midcomms layer when something is received for
+ the lockspace. It could be either a MSG (normal message sent as part of
+ standard locking activity) or an RCOM (recovery message sent as part of
+ lockspace recovery). */
+
+void dlm_receive_buffer(struct dlm_header *hd, int nodeid)
+{
+ struct dlm_message *ms = (struct dlm_message *) hd;
+ struct dlm_rcom *rc = (struct dlm_rcom *) hd;
+ struct dlm_ls *ls;
+ int type = 0;
+
+ switch (hd->h_cmd) {
+ case DLM_MSG:
+ dlm_message_in(ms);
+ type = ms->m_type;
+ break;
+ case DLM_RCOM:
+ dlm_rcom_in(rc);
+ type = rc->rc_type;
+ break;
+ default:
+ log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
+ return;
+ }
+
+ if (hd->h_nodeid != nodeid) {
+ log_print("invalid h_nodeid %d from %d lockspace %x",
+ hd->h_nodeid, nodeid, hd->h_lockspace);
+ return;
+ }
+
+ ls = dlm_find_lockspace_global(hd->h_lockspace);
+ if (!ls) {
+ log_print("invalid h_lockspace %x from %d cmd %d type %d",
+ hd->h_lockspace, nodeid, hd->h_cmd, type);
+
+ if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
+ dlm_send_ls_not_ready(nodeid, rc);
+ return;
+ }
+
+ /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
+ be inactive (in this ls) before transitioning to recovery mode */
+
+ down_read(&ls->ls_recv_active);
+ if (hd->h_cmd == DLM_MSG)
+ dlm_receive_message(ls, ms, nodeid);
+ else
+ dlm_receive_rcom(ls, rc, nodeid);
+ up_read(&ls->ls_recv_active);
+
+ dlm_put_lockspace(ls);
+}
static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
@@ -4429,7 +4455,8 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (lvb_in && ua->lksb.sb_lvbptr)
memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
- ua->castparam = ua_tmp->castparam;
+ if (ua_tmp->castparam)
+ ua->castparam = ua_tmp->castparam;
ua->user_lksb = ua_tmp->user_lksb;
error = set_unlock_args(flags, ua, &args);
@@ -4474,7 +4501,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
goto out;
ua = (struct dlm_user_args *)lkb->lkb_astparam;
- ua->castparam = ua_tmp->castparam;
+ if (ua_tmp->castparam)
+ ua->castparam = ua_tmp->castparam;
ua->user_lksb = ua_tmp->user_lksb;
error = set_unlock_args(flags, ua, &args);
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 1720313c22d..ada04680a1e 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -16,7 +16,8 @@
void dlm_print_rsb(struct dlm_rsb *r);
void dlm_dump_rsb(struct dlm_rsb *r);
void dlm_print_lkb(struct dlm_lkb *lkb);
-int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery);
+void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms);
+void dlm_receive_buffer(struct dlm_header *hd, int nodeid);
int dlm_modes_compat(int mode1, int mode2);
int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
unsigned int flags, struct dlm_rsb **r_ret);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 1dc72105ab1..6353a838452 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -167,7 +167,6 @@ static struct kobj_type dlm_ktype = {
};
static struct kset dlm_kset = {
- .kobj = {.name = "dlm",},
.ktype = &dlm_ktype,
};
@@ -228,6 +227,7 @@ int dlm_lockspace_init(void)
INIT_LIST_HEAD(&lslist);
spin_lock_init(&lslist_lock);
+ kobject_set_name(&dlm_kset.kobj, "dlm");
kobj_set_kset_s(&dlm_kset, kernel_subsys);
error = kset_register(&dlm_kset);
if (error)
@@ -519,6 +519,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
ls->ls_recover_seq = 0;
ls->ls_recover_args = NULL;
init_rwsem(&ls->ls_in_recovery);
+ init_rwsem(&ls->ls_recv_active);
INIT_LIST_HEAD(&ls->ls_requestqueue);
mutex_init(&ls->ls_requestqueue_mutex);
mutex_init(&ls->ls_clear_proc_locks);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 9e9d2e82f40..58bf3f5cdbe 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -334,18 +334,8 @@ static void close_connection(struct connection *con, bool and_other)
con->rx_page = NULL;
}
- /* If we are an 'othercon' then NULL the pointer to us
- from the parent and tidy ourself up */
- if (test_bit(CF_IS_OTHERCON, &con->flags)) {
- struct connection *parent = __nodeid2con(con->nodeid, 0);
- parent->othercon = NULL;
- kmem_cache_free(con_cache, con);
- }
- else {
- /* Parent connections get reused */
- con->retries = 0;
- mutex_unlock(&con->sock_mutex);
- }
+ con->retries = 0;
+ mutex_unlock(&con->sock_mutex);
}
/* We only send shutdown messages to nodes that are not part of the cluster */
@@ -731,6 +721,8 @@ static int tcp_accept_from_sock(struct connection *con)
INIT_WORK(&othercon->swork, process_send_sockets);
INIT_WORK(&othercon->rwork, process_recv_sockets);
set_bit(CF_IS_OTHERCON, &othercon->flags);
+ }
+ if (!othercon->sock) {
newcon->othercon = othercon;
othercon->sock = newsock;
newsock->sk->sk_user_data = othercon;
@@ -1272,14 +1264,15 @@ static void send_to_sock(struct connection *con)
if (len) {
ret = sendpage(con->sock, e->page, offset, len,
msg_flags);
- if (ret == -EAGAIN || ret == 0)
+ if (ret == -EAGAIN || ret == 0) {
+ cond_resched();
goto out;
+ }
if (ret <= 0)
goto send_error;
- } else {
+ }
/* Don't starve people filling buffers */
cond_resched();
- }
spin_lock(&con->writequeue_lock);
e->offset += ret;
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index d09977528f6..e9cdcab306e 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -18,10 +18,6 @@
#include "rcom.h"
#include "config.h"
-/*
- * Following called by dlm_recoverd thread
- */
-
static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
{
struct dlm_member *memb = NULL;
@@ -250,18 +246,30 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
return error;
}
-/*
- * Following called from lockspace.c
- */
+/* Userspace guarantees that dlm_ls_stop() has completed on all nodes before
+ dlm_ls_start() is called on any of them to start the new recovery. */
int dlm_ls_stop(struct dlm_ls *ls)
{
int new;
/*
- * A stop cancels any recovery that's in progress (see RECOVERY_STOP,
- * dlm_recovery_stopped()) and prevents any new locks from being
- * processed (see RUNNING, dlm_locking_stopped()).
+ * Prevent dlm_recv from being in the middle of something when we do
+ * the stop. This includes ensuring dlm_recv isn't processing a
+ * recovery message (rcom), while dlm_recoverd is aborting and
+ * resetting things from an in-progress recovery. i.e. we want
+ * dlm_recoverd to abort its recovery without worrying about dlm_recv
+ * processing an rcom at the same time. Stopping dlm_recv also makes
+ * it easy for dlm_receive_message() to check locking stopped and add a
+ * message to the requestqueue without races.
+ */
+
+ down_write(&ls->ls_recv_active);
+
+ /*
+ * Abort any recovery that's in progress (see RECOVERY_STOP,
+ * dlm_recovery_stopped()) and tell any other threads running in the
+ * dlm to quit any processing (see RUNNING, dlm_locking_stopped()).
*/
spin_lock(&ls->ls_recover_lock);
@@ -271,8 +279,14 @@ int dlm_ls_stop(struct dlm_ls *ls)
spin_unlock(&ls->ls_recover_lock);
/*
+ * Let dlm_recv run again, now any normal messages will be saved on the
+ * requestqueue for later.
+ */
+
+ up_write(&ls->ls_recv_active);
+
+ /*
* This in_recovery lock does two things:
- *
* 1) Keeps this function from returning until all threads are out
* of locking routines and locking is truely stopped.
* 2) Keeps any new requests from being processed until it's unlocked
@@ -284,9 +298,8 @@ int dlm_ls_stop(struct dlm_ls *ls)
/*
* The recoverd suspend/resume makes sure that dlm_recoverd (if
- * running) has noticed the clearing of RUNNING above and quit
- * processing the previous recovery. This will be true for all nodes
- * before any nodes start the new recovery.
+ * running) has noticed RECOVERY_STOP above and quit processing the
+ * previous recovery.
*/
dlm_recoverd_suspend(ls);
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index a5126e0c68a..f8c69dda16a 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -27,7 +27,6 @@
#include "dlm_internal.h"
#include "lowcomms.h"
#include "config.h"
-#include "rcom.h"
#include "lock.h"
#include "midcomms.h"
@@ -117,19 +116,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
offset &= (limit - 1);
len -= msglen;
- switch (msg->h_cmd) {
- case DLM_MSG:
- dlm_receive_message(msg, nodeid, 0);
- break;
-
- case DLM_RCOM:
- dlm_receive_rcom(msg, nodeid);
- break;
-
- default:
- log_print("unknown msg type %x from %u: %u %u %u %u",
- msg->h_cmd, nodeid, msglen, len, offset, ret);
- }
+ dlm_receive_buffer(msg, nodeid);
}
if (msg != (struct dlm_header *) __tmp)
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 188b91c027e..ae2fd97fa4a 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -386,7 +386,10 @@ static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
dlm_recover_process_copy(ls, rc_in);
}
-static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
+/* If the lockspace doesn't exist then still send a status message
+ back; it's possible that it just doesn't have its global_id yet. */
+
+int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
struct rcom_config *rf;
@@ -446,28 +449,11 @@ static int is_old_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
return rv;
}
-/* Called by dlm_recvd; corresponds to dlm_receive_message() but special
+/* Called by dlm_recv; corresponds to dlm_receive_message() but special
recovery-only comms are sent through here. */
-void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
+void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
{
- struct dlm_rcom *rc = (struct dlm_rcom *) hd;
- struct dlm_ls *ls;
-
- dlm_rcom_in(rc);
-
- /* If the lockspace doesn't exist then still send a status message
- back; it's possible that it just doesn't have its global_id yet. */
-
- ls = dlm_find_lockspace_global(hd->h_lockspace);
- if (!ls) {
- log_print("lockspace %x from %d type %x not found",
- hd->h_lockspace, nodeid, rc->rc_type);
- if (rc->rc_type == DLM_RCOM_STATUS)
- send_ls_not_ready(nodeid, rc);
- return;
- }
-
if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) {
log_debug(ls, "ignoring recovery message %x from %d",
rc->rc_type, nodeid);
@@ -477,12 +463,6 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
if (is_old_reply(ls, rc))
goto out;
- if (nodeid != rc->rc_header.h_nodeid) {
- log_error(ls, "bad rcom nodeid %d from %d",
- rc->rc_header.h_nodeid, nodeid);
- goto out;
- }
-
switch (rc->rc_type) {
case DLM_RCOM_STATUS:
receive_rcom_status(ls, rc);
@@ -520,6 +500,6 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
DLM_ASSERT(0, printk("rc_type=%x\n", rc->rc_type););
}
out:
- dlm_put_lockspace(ls);
+ return;
}
diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h
index d7984321ff4..b09abd29ba3 100644
--- a/fs/dlm/rcom.h
+++ b/fs/dlm/rcom.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -18,7 +18,8 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid);
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
-void dlm_receive_rcom(struct dlm_header *hd, int nodeid);
+void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid);
+int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in);
#endif
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 66575997861..4b89e20eebe 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -24,19 +24,28 @@
/* If the start for which we're re-enabling locking (seq) has been superseded
- by a newer stop (ls_recover_seq), we need to leave locking disabled. */
+ by a newer stop (ls_recover_seq), we need to leave locking disabled.
+
+ We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
+ locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
+ enables locking and clears the requestqueue between a and b. */
static int enable_locking(struct dlm_ls *ls, uint64_t seq)
{
int error = -EINTR;
+ down_write(&ls->ls_recv_active);
+
spin_lock(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) {
set_bit(LSFL_RUNNING, &ls->ls_flags);
+ /* unblocks processes waiting to enter the dlm */
up_write(&ls->ls_in_recovery);
error = 0;
}
spin_unlock(&ls->ls_recover_lock);
+
+ up_write(&ls->ls_recv_active);
return error;
}
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index 65008d79c96..0de04f17cce 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -20,7 +20,7 @@
struct rq_entry {
struct list_head list;
int nodeid;
- char request[1];
+ char request[0];
};
/*
@@ -30,42 +30,39 @@ struct rq_entry {
* lockspace is enabled on some while still suspended on others.
*/
-int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
+void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
{
struct rq_entry *e;
int length = hd->h_length;
- int rv = 0;
e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
if (!e) {
- log_print("dlm_add_requestqueue: out of memory\n");
- return 0;
+ log_print("dlm_add_requestqueue: out of memory len %d", length);
+ return;
}
e->nodeid = nodeid;
memcpy(e->request, hd, length);
- /* We need to check dlm_locking_stopped() after taking the mutex to
- avoid a race where dlm_recoverd enables locking and runs
- process_requestqueue between our earlier dlm_locking_stopped check
- and this addition to the requestqueue. */
-
mutex_lock(&ls->ls_requestqueue_mutex);
- if (dlm_locking_stopped(ls))
- list_add_tail(&e->list, &ls->ls_requestqueue);
- else {
- log_debug(ls, "dlm_add_requestqueue skip from %d", nodeid);
- kfree(e);
- rv = -EAGAIN;
- }
+ list_add_tail(&e->list, &ls->ls_requestqueue);
mutex_unlock(&ls->ls_requestqueue_mutex);
- return rv;
}
+/*
+ * Called by dlm_recoverd to process normal messages saved while recovery was
+ * happening. Normal locking has been enabled before this is called. dlm_recv
+ * upon receiving a message, will wait for all saved messages to be drained
+ * here before processing the message it got. If a new dlm_ls_stop() arrives
+ * while we're processing these saved messages, it may block trying to suspend
+ * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that
+ * case, we don't abort since locking_stopped is still 0. If dlm_recv is not
+ * waiting for us, then this processing may be aborted due to locking_stopped.
+ */
+
int dlm_process_requestqueue(struct dlm_ls *ls)
{
struct rq_entry *e;
- struct dlm_header *hd;
int error = 0;
mutex_lock(&ls->ls_requestqueue_mutex);
@@ -79,14 +76,7 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
mutex_unlock(&ls->ls_requestqueue_mutex);
- hd = (struct dlm_header *) e->request;
- error = dlm_receive_message(hd, e->nodeid, 1);
-
- if (error == -EINTR) {
- /* entry is left on requestqueue */
- log_debug(ls, "process_requestqueue abort eintr");
- break;
- }
+ dlm_receive_message_saved(ls, (struct dlm_message *)e->request);
mutex_lock(&ls->ls_requestqueue_mutex);
list_del(&e->list);
@@ -106,10 +96,12 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
/*
* After recovery is done, locking is resumed and dlm_recoverd takes all the
- * saved requests and processes them as they would have been by dlm_recvd. At
- * the same time, dlm_recvd will start receiving new requests from remote
- * nodes. We want to delay dlm_recvd processing new requests until
- * dlm_recoverd has finished processing the old saved requests.
+ * saved requests and processes them as they would have been by dlm_recv. At
+ * the same time, dlm_recv will start receiving new requests from remote nodes.
+ * We want to delay dlm_recv processing new requests until dlm_recoverd has
+ * finished processing the old saved requests. We don't check for locking
+ * stopped here because dlm_ls_stop won't stop locking until it's suspended us
+ * (dlm_recv).
*/
void dlm_wait_requestqueue(struct dlm_ls *ls)
@@ -118,8 +110,6 @@ void dlm_wait_requestqueue(struct dlm_ls *ls)
mutex_lock(&ls->ls_requestqueue_mutex);
if (list_empty(&ls->ls_requestqueue))
break;
- if (dlm_locking_stopped(ls))
- break;
mutex_unlock(&ls->ls_requestqueue_mutex);
schedule();
}
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
index 6a53ea03335..aba34fc05ee 100644
--- a/fs/dlm/requestqueue.h
+++ b/fs/dlm/requestqueue.h
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -13,7 +13,7 @@
#ifndef __REQUESTQUEUE_DOT_H__
#define __REQUESTQUEUE_DOT_H__
-int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
+void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
int dlm_process_requestqueue(struct dlm_ls *ls);
void dlm_wait_requestqueue(struct dlm_ls *ls);
void dlm_purge_requestqueue(struct dlm_ls *ls);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index cd805a66880..93fa427bb5f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -93,9 +93,10 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
map_bh(bh, inode->i_sb, block);
set_buffer_uptodate(bh);
+ if (!gfs2_is_jdata(ip))
+ mark_buffer_dirty(bh);
if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
gfs2_trans_add_bh(ip->i_gl, bh, 0);
- mark_buffer_dirty(bh);
if (release) {
unlock_page(page);
@@ -1085,6 +1086,33 @@ static int do_shrink(struct gfs2_inode *ip, u64 size)
return error;
}
+static int do_touch(struct gfs2_inode *ip, u64 size)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct buffer_head *dibh;
+ int error;
+
+ error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+ if (error)
+ return error;
+
+ down_write(&ip->i_rw_mutex);
+
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (error)
+ goto do_touch_out;
+
+ ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+ gfs2_dinode_out(ip, dibh->b_data);
+ brelse(dibh);
+
+do_touch_out:
+ up_write(&ip->i_rw_mutex);
+ gfs2_trans_end(sdp);
+ return error;
+}
+
/**
* gfs2_truncatei - make a file a given size
* @ip: the inode
@@ -1105,8 +1133,11 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
if (size > ip->i_di.di_size)
error = do_grow(ip, size);
- else
+ else if (size < ip->i_di.di_size)
error = do_shrink(ip, size);
+ else
+ /* update time stamps */
+ error = do_touch(ip, size);
return error;
}
diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c
index 3548d9f31e0..3731ab0771d 100644
--- a/fs/gfs2/daemon.c
+++ b/fs/gfs2/daemon.c
@@ -35,30 +35,6 @@
The kthread functions used to start these daemons block and flush signals. */
/**
- * gfs2_scand - Look for cached glocks and inodes to toss from memory
- * @sdp: Pointer to GFS2 superblock
- *
- * One of these daemons runs, finding candidates to add to sd_reclaim_list.
- * See gfs2_glockd()
- */
-
-int gfs2_scand(void *data)
-{
- struct gfs2_sbd *sdp = data;
- unsigned long t;
-
- while (!kthread_should_stop()) {
- gfs2_scand_internal(sdp);
- t = gfs2_tune_get(sdp, gt_scand_secs) * HZ;
- if (freezing(current))
- refrigerator();
- schedule_timeout_interruptible(t);
- }
-
- return 0;
-}
-
-/**
* gfs2_glockd - Reclaim unused glock structures
* @sdp: Pointer to GFS2 superblock
*
diff --git a/fs/gfs2/daemon.h b/fs/gfs2/daemon.h
index 801007120fb..0de9b355795 100644
--- a/fs/gfs2/daemon.h
+++ b/fs/gfs2/daemon.h
@@ -10,7 +10,6 @@
#ifndef __DAEMON_DOT_H__
#define __DAEMON_DOT_H__
-int gfs2_scand(void *data);
int gfs2_glockd(void *data);
int gfs2_recoverd(void *data);
int gfs2_logd(void *data);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 2beb2f401aa..9949bb746a5 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1043,6 +1043,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
error = gfs2_meta_inode_buffer(dip, &dibh);
if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
+ gfs2_trans_add_bh(dip->i_gl, dibh, 1);
dip->i_di.di_blocks++;
gfs2_set_inode_blocks(&dip->i_inode);
gfs2_dinode_out(dip, dibh->b_data);
@@ -1501,7 +1502,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
- be64_to_cpu(dent->de_inum.no_formal_ino));
+ be64_to_cpu(dent->de_inum.no_formal_ino), 0);
brelse(bh);
return inode;
}
diff --git a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c
index 1ab3e9d7388..aa8dbf303f6 100644
--- a/fs/gfs2/eaops.c
+++ b/fs/gfs2/eaops.c
@@ -200,28 +200,28 @@ static int security_eo_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
return gfs2_ea_remove_i(ip, er);
}
-static struct gfs2_eattr_operations gfs2_user_eaops = {
+static const struct gfs2_eattr_operations gfs2_user_eaops = {
.eo_get = user_eo_get,
.eo_set = user_eo_set,
.eo_remove = user_eo_remove,
.eo_name = "user",
};
-struct gfs2_eattr_operations gfs2_system_eaops = {
+const struct gfs2_eattr_operations gfs2_system_eaops = {
.eo_get = system_eo_get,
.eo_set = system_eo_set,
.eo_remove = system_eo_remove,
.eo_name = "system",
};
-static struct gfs2_eattr_operations gfs2_security_eaops = {
+static const struct gfs2_eattr_operations gfs2_security_eaops = {
.eo_get = security_eo_get,
.eo_set = security_eo_set,
.eo_remove = security_eo_remove,
.eo_name = "security",
};
-struct gfs2_eattr_operations *gfs2_ea_ops[] = {
+const struct gfs2_eattr_operations *gfs2_ea_ops[] = {
NULL,
&gfs2_user_eaops,
&gfs2_system_eaops,
diff --git a/fs/gfs2/eaops.h b/fs/gfs2/eaops.h
index 508b4f7a244..da2f7fbbb40 100644
--- a/fs/gfs2/eaops.h
+++ b/fs/gfs2/eaops.h
@@ -22,9 +22,9 @@ struct gfs2_eattr_operations {
unsigned int gfs2_ea_name2type(const char *name, const char **truncated_name);
-extern struct gfs2_eattr_operations gfs2_system_eaops;
+extern const struct gfs2_eattr_operations gfs2_system_eaops;
-extern struct gfs2_eattr_operations *gfs2_ea_ops[];
+extern const struct gfs2_eattr_operations *gfs2_ea_ops[];
#endif /* __EAOPS_DOT_H__ */
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 3f0974e1afe..a37efe4aae6 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -25,8 +25,10 @@
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
#include "gfs2.h"
#include "incore.h"
@@ -48,7 +50,6 @@ struct glock_iter {
int hash; /* hash bucket index */
struct gfs2_sbd *sdp; /* incore superblock */
struct gfs2_glock *gl; /* current glock struct */
- struct hlist_head *hb_list; /* current hash bucket ptr */
struct seq_file *seq; /* sequence file for debugfs */
char string[512]; /* scratch space */
};
@@ -59,8 +60,13 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
static void gfs2_glock_drop_th(struct gfs2_glock *gl);
+static void run_queue(struct gfs2_glock *gl);
+
static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
+static struct task_struct *scand_process;
+static unsigned int scand_secs = 5;
+static struct workqueue_struct *glock_workqueue;
#define GFS2_GL_HASH_SHIFT 15
#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
@@ -276,6 +282,18 @@ static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
return gl;
}
+static void glock_work_func(struct work_struct *work)
+{
+ struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+
+ spin_lock(&gl->gl_spin);
+ if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
+ set_bit(GLF_DEMOTE, &gl->gl_flags);
+ run_queue(gl);
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_put(gl);
+}
+
/**
* gfs2_glock_get() - Get a glock, or create one if one doesn't exist
* @sdp: The GFS2 superblock
@@ -315,6 +333,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_name = name;
atomic_set(&gl->gl_ref, 1);
gl->gl_state = LM_ST_UNLOCKED;
+ gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_hash = hash;
gl->gl_owner_pid = 0;
gl->gl_ip = 0;
@@ -323,10 +342,12 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_req_bh = NULL;
gl->gl_vn = 0;
gl->gl_stamp = jiffies;
+ gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_sbd = sdp;
gl->gl_aspace = NULL;
lops_init_le(&gl->gl_le, &gfs2_glock_lops);
+ INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
/* If this glock protects actual on-disk data or metadata blocks,
create a VFS inode to manage the pages/buffers holding them. */
@@ -440,6 +461,8 @@ static void wait_on_holder(struct gfs2_holder *gh)
static void gfs2_demote_wake(struct gfs2_glock *gl)
{
+ BUG_ON(!spin_is_locked(&gl->gl_spin));
+ gl->gl_demote_state = LM_ST_EXCLUSIVE;
clear_bit(GLF_DEMOTE, &gl->gl_flags);
smp_mb__after_clear_bit();
wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
@@ -545,12 +568,14 @@ static int rq_demote(struct gfs2_glock *gl)
return 0;
}
set_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
if (gl->gl_demote_state == LM_ST_UNLOCKED ||
- gl->gl_state != LM_ST_EXCLUSIVE)
+ gl->gl_state != LM_ST_EXCLUSIVE) {
+ spin_unlock(&gl->gl_spin);
gfs2_glock_drop_th(gl);
- else
+ } else {
+ spin_unlock(&gl->gl_spin);
gfs2_glock_xmote_th(gl, NULL);
+ }
spin_lock(&gl->gl_spin);
return 0;
@@ -679,24 +704,25 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
* practise: LM_ST_SHARED and LM_ST_UNLOCKED
*/
-static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
+static void handle_callback(struct gfs2_glock *gl, unsigned int state,
+ int remote, unsigned long delay)
{
+ int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
+
spin_lock(&gl->gl_spin);
- if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
+ set_bit(bit, &gl->gl_flags);
+ if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
gl->gl_object) {
- struct inode *inode = igrab(gl->gl_object);
+ gfs2_glock_schedule_for_reclaim(gl);
spin_unlock(&gl->gl_spin);
- if (inode) {
- d_prune_aliases(inode);
- iput(inode);
- }
return;
}
- } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
- gl->gl_demote_state = state;
+ } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
+ gl->gl_demote_state != state) {
+ gl->gl_demote_state = LM_ST_UNLOCKED;
}
spin_unlock(&gl->gl_spin);
}
@@ -723,6 +749,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
}
gl->gl_state = new_state;
+ gl->gl_tchange = jiffies;
}
/**
@@ -760,10 +787,20 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
if (!gh) {
gl->gl_stamp = jiffies;
- if (ret & LM_OUT_CANCELED)
+ if (ret & LM_OUT_CANCELED) {
op_done = 0;
- else
+ } else {
+ spin_lock(&gl->gl_spin);
+ if (gl->gl_state != gl->gl_demote_state) {
+ gl->gl_req_bh = NULL;
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_drop_th(gl);
+ gfs2_glock_put(gl);
+ return;
+ }
gfs2_demote_wake(gl);
+ spin_unlock(&gl->gl_spin);
+ }
} else {
spin_lock(&gl->gl_spin);
list_del_init(&gh->gh_list);
@@ -799,7 +836,6 @@ out:
gl->gl_req_gh = NULL;
gl->gl_req_bh = NULL;
clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl);
spin_unlock(&gl->gl_spin);
}
@@ -817,7 +853,7 @@ out:
*
*/
-void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
+static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
int flags = gh ? gh->gh_flags : 0;
@@ -871,7 +907,6 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
gfs2_assert_warn(sdp, !ret);
state_change(gl, LM_ST_UNLOCKED);
- gfs2_demote_wake(gl);
if (glops->go_inval)
glops->go_inval(gl, DIO_METADATA);
@@ -884,10 +919,10 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
}
spin_lock(&gl->gl_spin);
+ gfs2_demote_wake(gl);
gl->gl_req_gh = NULL;
gl->gl_req_bh = NULL;
clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl);
spin_unlock(&gl->gl_spin);
gfs2_glock_put(gl);
@@ -1067,24 +1102,31 @@ static void add_to_queue(struct gfs2_holder *gh)
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
BUG();
- existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
- if (existing) {
- print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
- printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
- printk(KERN_INFO "lock type : %d lock state : %d\n",
- existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
- printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
- printk(KERN_INFO "lock type : %d lock state : %d\n",
- gl->gl_name.ln_type, gl->gl_state);
- BUG();
- }
-
- existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
- if (existing) {
- print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
- BUG();
+ if (!(gh->gh_flags & GL_FLOCK)) {
+ existing = find_holder_by_owner(&gl->gl_holders,
+ gh->gh_owner_pid);
+ if (existing) {
+ print_symbol(KERN_WARNING "original: %s\n",
+ existing->gh_ip);
+ printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
+ printk(KERN_INFO "lock type : %d lock state : %d\n",
+ existing->gh_gl->gl_name.ln_type,
+ existing->gh_gl->gl_state);
+ print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
+ printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
+ printk(KERN_INFO "lock type : %d lock state : %d\n",
+ gl->gl_name.ln_type, gl->gl_state);
+ BUG();
+ }
+
+ existing = find_holder_by_owner(&gl->gl_waiters3,
+ gh->gh_owner_pid);
+ if (existing) {
+ print_symbol(KERN_WARNING "original: %s\n",
+ existing->gh_ip);
+ print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
+ BUG();
+ }
}
if (gh->gh_flags & LM_FLAG_PRIORITY)
@@ -1195,9 +1237,10 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
const struct gfs2_glock_operations *glops = gl->gl_ops;
+ unsigned delay = 0;
if (gh->gh_flags & GL_NOCACHE)
- handle_callback(gl, LM_ST_UNLOCKED, 0);
+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
gfs2_glmutex_lock(gl);
@@ -1215,8 +1258,14 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
}
clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl);
spin_unlock(&gl->gl_spin);
+
+ gfs2_glock_hold(gl);
+ if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
+ !test_bit(GLF_DEMOTE, &gl->gl_flags))
+ delay = gl->gl_ops->go_min_hold_time;
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ gfs2_glock_put(gl);
}
void gfs2_glock_dq_wait(struct gfs2_holder *gh)
@@ -1443,18 +1492,21 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
unsigned int state)
{
struct gfs2_glock *gl;
+ unsigned long delay = 0;
+ unsigned long holdtime;
+ unsigned long now = jiffies;
gl = gfs2_glock_find(sdp, name);
if (!gl)
return;
- handle_callback(gl, state, 1);
-
- spin_lock(&gl->gl_spin);
- run_queue(gl);
- spin_unlock(&gl->gl_spin);
+ holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
+ if (time_before(now, holdtime))
+ delay = holdtime - now;
- gfs2_glock_put(gl);
+ handle_callback(gl, state, 1, delay);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ gfs2_glock_put(gl);
}
/**
@@ -1495,7 +1547,8 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
return;
if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
gl->gl_req_bh(gl, async->lc_ret);
- gfs2_glock_put(gl);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
up_read(&gfs2_umount_flush_sem);
return;
}
@@ -1588,7 +1641,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
if (gfs2_glmutex_trylock(gl)) {
if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
- handle_callback(gl, LM_ST_UNLOCKED, 0);
+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
gfs2_glmutex_unlock(gl);
}
@@ -1617,7 +1670,7 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
goto out;
gl = list_entry(head->first, struct gfs2_glock, gl_list);
while(1) {
- if (gl->gl_sbd == sdp) {
+ if (!sdp || gl->gl_sbd == sdp) {
gfs2_glock_hold(gl);
read_unlock(gl_lock_addr(hash));
if (prev)
@@ -1635,6 +1688,7 @@ out:
read_unlock(gl_lock_addr(hash));
if (prev)
gfs2_glock_put(prev);
+ cond_resched();
return has_entries;
}
@@ -1663,20 +1717,6 @@ out_schedule:
}
/**
- * gfs2_scand_internal - Look for glocks and inodes to toss from memory
- * @sdp: the filesystem
- *
- */
-
-void gfs2_scand_internal(struct gfs2_sbd *sdp)
-{
- unsigned int x;
-
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
- examine_bucket(scan_glock, sdp, x);
-}
-
-/**
* clear_glock - look at a glock and see if we can free it from glock cache
* @gl: the glock to look at
*
@@ -1701,7 +1741,7 @@ static void clear_glock(struct gfs2_glock *gl)
if (gfs2_glmutex_trylock(gl)) {
if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED)
- handle_callback(gl, LM_ST_UNLOCKED, 0);
+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
gfs2_glmutex_unlock(gl);
}
}
@@ -1843,7 +1883,7 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
spin_lock(&gl->gl_spin);
- print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
+ print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number);
print_dbg(gi, " gl_flags =");
for (x = 0; x < 32; x++) {
@@ -1963,6 +2003,35 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
return error;
}
+/**
+ * gfs2_scand - Look for cached glocks and inodes to toss from memory
+ * @sdp: Pointer to GFS2 superblock
+ *
+ * One of these daemons runs, finding candidates to add to sd_reclaim_list.
+ * See gfs2_glockd()
+ */
+
+static int gfs2_scand(void *data)
+{
+ unsigned x;
+ unsigned delay;
+
+ while (!kthread_should_stop()) {
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(scan_glock, NULL, x);
+ if (freezing(current))
+ refrigerator();
+ delay = scand_secs;
+ if (delay < 1)
+ delay = 1;
+ schedule_timeout_interruptible(delay * HZ);
+ }
+
+ return 0;
+}
+
+
+
int __init gfs2_glock_init(void)
{
unsigned i;
@@ -1974,52 +2043,69 @@ int __init gfs2_glock_init(void)
rwlock_init(&gl_hash_locks[i]);
}
#endif
+
+ scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
+ if (IS_ERR(scand_process))
+ return PTR_ERR(scand_process);
+
+ glock_workqueue = create_workqueue("glock_workqueue");
+ if (IS_ERR(glock_workqueue)) {
+ kthread_stop(scand_process);
+ return PTR_ERR(glock_workqueue);
+ }
+
return 0;
}
+void gfs2_glock_exit(void)
+{
+ destroy_workqueue(glock_workqueue);
+ kthread_stop(scand_process);
+}
+
+module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
+
static int gfs2_glock_iter_next(struct glock_iter *gi)
{
+ struct gfs2_glock *gl;
+
+restart:
read_lock(gl_lock_addr(gi->hash));
- while (1) {
- if (!gi->hb_list) { /* If we don't have a hash bucket yet */
- gi->hb_list = &gl_hash_table[gi->hash].hb_list;
- if (hlist_empty(gi->hb_list)) {
- read_unlock(gl_lock_addr(gi->hash));
- gi->hash++;
- read_lock(gl_lock_addr(gi->hash));
- gi->hb_list = NULL;
- if (gi->hash >= GFS2_GL_HASH_SIZE) {
- read_unlock(gl_lock_addr(gi->hash));
- return 1;
- }
- else
- continue;
- }
- if (!hlist_empty(gi->hb_list)) {
- gi->gl = list_entry(gi->hb_list->first,
- struct gfs2_glock,
- gl_list);
- }
- } else {
- if (gi->gl->gl_list.next == NULL) {
- read_unlock(gl_lock_addr(gi->hash));
- gi->hash++;
- read_lock(gl_lock_addr(gi->hash));
- gi->hb_list = NULL;
- continue;
- }
- gi->gl = list_entry(gi->gl->gl_list.next,
- struct gfs2_glock, gl_list);
- }
+ gl = gi->gl;
+ if (gl) {
+ gi->gl = hlist_entry(gl->gl_list.next,
+ struct gfs2_glock, gl_list);
if (gi->gl)
- break;
+ gfs2_glock_hold(gi->gl);
}
read_unlock(gl_lock_addr(gi->hash));
+ if (gl)
+ gfs2_glock_put(gl);
+ if (gl && gi->gl == NULL)
+ gi->hash++;
+ while(gi->gl == NULL) {
+ if (gi->hash >= GFS2_GL_HASH_SIZE)
+ return 1;
+ read_lock(gl_lock_addr(gi->hash));
+ gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
+ struct gfs2_glock, gl_list);
+ if (gi->gl)
+ gfs2_glock_hold(gi->gl);
+ read_unlock(gl_lock_addr(gi->hash));
+ gi->hash++;
+ }
+
+ if (gi->sdp != gi->gl->gl_sbd)
+ goto restart;
+
return 0;
}
static void gfs2_glock_iter_free(struct glock_iter *gi)
{
+ if (gi->gl)
+ gfs2_glock_put(gi->gl);
kfree(gi);
}
@@ -2033,9 +2119,8 @@ static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
gi->sdp = sdp;
gi->hash = 0;
- gi->gl = NULL;
- gi->hb_list = NULL;
gi->seq = NULL;
+ gi->gl = NULL;
memset(gi->string, 0, sizeof(gi->string));
if (gfs2_glock_iter_next(gi)) {
@@ -2055,7 +2140,7 @@ static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
if (!gi)
return NULL;
- while (n--) {
+ while(n--) {
if (gfs2_glock_iter_next(gi)) {
gfs2_glock_iter_free(gi);
return NULL;
@@ -2082,7 +2167,9 @@ static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
{
- /* nothing for now */
+ struct glock_iter *gi = iter_ptr;
+ if (gi)
+ gfs2_glock_iter_free(gi);
}
static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
@@ -2095,7 +2182,7 @@ static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
return 0;
}
-static struct seq_operations gfs2_glock_seq_ops = {
+static const struct seq_operations gfs2_glock_seq_ops = {
.start = gfs2_glock_seq_start,
.next = gfs2_glock_seq_next,
.stop = gfs2_glock_seq_stop,
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 7721ca3fff9..b16f604eea9 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -26,6 +26,7 @@
#define GL_SKIP 0x00000100
#define GL_ATIME 0x00000200
#define GL_NOCACHE 0x00000400
+#define GL_FLOCK 0x00000800
#define GL_NOCANCEL 0x00001000
#define GLR_TRYFAILED 13
@@ -132,11 +133,11 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
-
-void gfs2_scand_internal(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
int __init gfs2_glock_init(void);
+void gfs2_glock_exit(void);
+
int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
int gfs2_register_debugfs(void);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 777ca46010e..4670dcb2a87 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -41,7 +41,6 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
struct list_head *head = &gl->gl_ail_list;
struct gfs2_bufdata *bd;
struct buffer_head *bh;
- u64 blkno;
int error;
blocks = atomic_read(&gl->gl_ail_count);
@@ -57,19 +56,12 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
bd = list_entry(head->next, struct gfs2_bufdata,
bd_ail_gl_list);
bh = bd->bd_bh;
- blkno = bh->b_blocknr;
+ gfs2_remove_from_ail(NULL, bd);
+ bd->bd_bh = NULL;
+ bh->b_private = NULL;
+ bd->bd_blkno = bh->b_blocknr;
gfs2_assert_withdraw(sdp, !buffer_busy(bh));
-
- bd->bd_ail = NULL;
- list_del(&bd->bd_ail_st_list);
- list_del(&bd->bd_ail_gl_list);
- atomic_dec(&gl->gl_ail_count);
- brelse(bh);
- gfs2_log_unlock(sdp);
-
- gfs2_trans_add_revoke(sdp, blkno);
-
- gfs2_log_lock(sdp);
+ gfs2_trans_add_revoke(sdp, bd);
}
gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
gfs2_log_unlock(sdp);
@@ -156,9 +148,11 @@ static void inode_go_sync(struct gfs2_glock *gl)
ip = NULL;
if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
- if (ip)
+ if (ip && !gfs2_is_jdata(ip))
filemap_fdatawrite(ip->i_inode.i_mapping);
gfs2_log_flush(gl->gl_sbd, gl);
+ if (ip && gfs2_is_jdata(ip))
+ filemap_fdatawrite(ip->i_inode.i_mapping);
gfs2_meta_sync(gl);
if (ip) {
struct address_space *mapping = ip->i_inode.i_mapping;
@@ -452,6 +446,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_lock = inode_go_lock,
.go_unlock = inode_go_unlock,
.go_type = LM_TYPE_INODE,
+ .go_min_hold_time = HZ / 10,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -462,6 +457,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_lock = rgrp_go_lock,
.go_unlock = rgrp_go_unlock,
.go_type = LM_TYPE_RGRP,
+ .go_min_hold_time = HZ / 10,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 170ba93829c..eaddfb5a8e6 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -11,6 +11,7 @@
#define __INCORE_DOT_H__
#include <linux/fs.h>
+#include <linux/workqueue.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
@@ -113,7 +114,13 @@ struct gfs2_bufdata {
struct buffer_head *bd_bh;
struct gfs2_glock *bd_gl;
- struct list_head bd_list_tr;
+ union {
+ struct list_head list_tr;
+ u64 blkno;
+ } u;
+#define bd_list_tr u.list_tr
+#define bd_blkno u.blkno
+
struct gfs2_log_element bd_le;
struct gfs2_ail *bd_ail;
@@ -130,6 +137,7 @@ struct gfs2_glock_operations {
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
const int go_type;
+ const unsigned long go_min_hold_time;
};
enum {
@@ -161,6 +169,7 @@ enum {
GLF_LOCK = 1,
GLF_STICKY = 2,
GLF_DEMOTE = 3,
+ GLF_PENDING_DEMOTE = 4,
GLF_DIRTY = 5,
};
@@ -193,6 +202,7 @@ struct gfs2_glock {
u64 gl_vn;
unsigned long gl_stamp;
+ unsigned long gl_tchange;
void *gl_object;
struct list_head gl_reclaim;
@@ -203,6 +213,7 @@ struct gfs2_glock {
struct gfs2_log_element gl_le;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
+ struct delayed_work gl_work;
};
struct gfs2_alloc {
@@ -293,11 +304,6 @@ struct gfs2_file {
struct gfs2_holder f_fl_gh;
};
-struct gfs2_revoke {
- struct gfs2_log_element rv_le;
- u64 rv_blkno;
-};
-
struct gfs2_revoke_replay {
struct list_head rr_list;
u64 rr_blkno;
@@ -335,12 +341,6 @@ struct gfs2_quota_data {
unsigned long qd_last_touched;
};
-struct gfs2_log_buf {
- struct list_head lb_list;
- struct buffer_head *lb_bh;
- struct buffer_head *lb_real;
-};
-
struct gfs2_trans {
unsigned long tr_ip;
@@ -429,7 +429,6 @@ struct gfs2_tune {
unsigned int gt_log_flush_secs;
unsigned int gt_jindex_refresh_secs; /* Check for new journal index */
- unsigned int gt_scand_secs;
unsigned int gt_recoverd_secs;
unsigned int gt_logd_secs;
unsigned int gt_quotad_secs;
@@ -574,7 +573,6 @@ struct gfs2_sbd {
/* Daemon stuff */
- struct task_struct *sd_scand_process;
struct task_struct *sd_recoverd_process;
struct task_struct *sd_logd_process;
struct task_struct *sd_quotad_process;
@@ -609,13 +607,13 @@ struct gfs2_sbd {
unsigned int sd_log_num_revoke;
unsigned int sd_log_num_rg;
unsigned int sd_log_num_databuf;
- unsigned int sd_log_num_jdata;
struct list_head sd_log_le_gl;
struct list_head sd_log_le_buf;
struct list_head sd_log_le_revoke;
struct list_head sd_log_le_rg;
struct list_head sd_log_le_databuf;
+ struct list_head sd_log_le_ordered;
unsigned int sd_log_blks_free;
struct mutex sd_log_reserve_mutex;
@@ -627,7 +625,8 @@ struct gfs2_sbd {
unsigned long sd_log_flush_time;
struct rw_semaphore sd_log_flush_lock;
- struct list_head sd_log_flush_list;
+ atomic_t sd_log_in_flight;
+ wait_queue_head_t sd_log_flush_wait;
unsigned int sd_log_flush_head;
u64 sd_log_flush_wrapped;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 34f7bcdea1e..5f6dc32946c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -77,6 +77,49 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
}
+struct gfs2_skip_data {
+ u64 no_addr;
+ int skipped;
+};
+
+static int iget_skip_test(struct inode *inode, void *opaque)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_skip_data *data = opaque;
+
+ if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
+ if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
+ data->skipped = 1;
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int iget_skip_set(struct inode *inode, void *opaque)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_skip_data *data = opaque;
+
+ if (data->skipped)
+ return 1;
+ inode->i_ino = (unsigned long)(data->no_addr);
+ ip->i_no_addr = data->no_addr;
+ return 0;
+}
+
+static struct inode *gfs2_iget_skip(struct super_block *sb,
+ u64 no_addr)
+{
+ struct gfs2_skip_data data;
+ unsigned long hash = (unsigned long)no_addr;
+
+ data.no_addr = no_addr;
+ data.skipped = 0;
+ return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
+}
+
/**
* GFS2 lookup code fills in vfs inode contents based on info obtained
* from directory entry inside gfs2_inode_lookup(). This has caused issues
@@ -112,6 +155,7 @@ void gfs2_set_iop(struct inode *inode)
* @sb: The super block
* @no_addr: The inode number
* @type: The type of the inode
+ * @skip_freeing: set this not return an inode if it is currently being freed.
*
* Returns: A VFS inode, or an error
*/
@@ -119,13 +163,19 @@ void gfs2_set_iop(struct inode *inode)
struct inode *gfs2_inode_lookup(struct super_block *sb,
unsigned int type,
u64 no_addr,
- u64 no_formal_ino)
+ u64 no_formal_ino, int skip_freeing)
{
- struct inode *inode = gfs2_iget(sb, no_addr);
- struct gfs2_inode *ip = GFS2_I(inode);
+ struct inode *inode;
+ struct gfs2_inode *ip;
struct gfs2_glock *io_gl;
int error;
+ if (skip_freeing)
+ inode = gfs2_iget_skip(sb, no_addr);
+ else
+ inode = gfs2_iget(sb, no_addr);
+ ip = GFS2_I(inode);
+
if (!inode)
return ERR_PTR(-ENOBUFS);
@@ -244,6 +294,11 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
return 0;
}
+static void gfs2_inode_bh(struct gfs2_inode *ip, struct buffer_head *bh)
+{
+ ip->i_cache[0] = bh;
+}
+
/**
* gfs2_inode_refresh - Refresh the incore copy of the dinode
* @ip: The GFS2 inode
@@ -688,7 +743,7 @@ out:
static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
const struct gfs2_inum_host *inum, unsigned int mode,
unsigned int uid, unsigned int gid,
- const u64 *generation, dev_t dev)
+ const u64 *generation, dev_t dev, struct buffer_head **bhp)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_dinode *di;
@@ -743,13 +798,15 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
memset(&di->di_reserved, 0, sizeof(di->di_reserved));
+
+ set_buffer_uptodate(dibh);
- brelse(dibh);
+ *bhp = dibh;
}
static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
unsigned int mode, const struct gfs2_inum_host *inum,
- const u64 *generation, dev_t dev)
+ const u64 *generation, dev_t dev, struct buffer_head **bhp)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
unsigned int uid, gid;
@@ -770,7 +827,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
if (error)
goto out_quota;
- init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
+ init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
gfs2_quota_change(dip, +1, uid, gid);
gfs2_trans_end(sdp);
@@ -909,6 +966,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
int error;
u64 generation;
+ struct buffer_head *bh=NULL;
if (!name->len || name->len > GFS2_FNAMESIZE)
return ERR_PTR(-ENAMETOOLONG);
@@ -935,16 +993,18 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
if (error)
goto fail_gunlock;
- error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
+ error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
if (error)
goto fail_gunlock2;
inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
inum.no_addr,
- inum.no_formal_ino);
+ inum.no_formal_ino, 0);
if (IS_ERR(inode))
goto fail_gunlock2;
+ gfs2_inode_bh(GFS2_I(inode), bh);
+
error = gfs2_inode_refresh(GFS2_I(inode));
if (error)
goto fail_gunlock2;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 4517ac82c01..351ac87ab38 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -49,7 +49,8 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
void gfs2_inode_attr_in(struct gfs2_inode *ip);
void gfs2_set_iop(struct inode *inode);
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino);
+ u64 no_addr, u64 no_formal_ino,
+ int skip_freeing);
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 24d70f73b65..9e8265d2837 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/list.h>
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c
index fba1f1d87e4..1f7b038530b 100644
--- a/fs/gfs2/locking/dlm/plock.c
+++ b/fs/gfs2/locking/dlm/plock.c
@@ -346,15 +346,16 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
static unsigned int dev_poll(struct file *file, poll_table *wait)
{
+ unsigned int mask = 0;
+
poll_wait(file, &send_wq, wait);
spin_lock(&ops_lock);
- if (!list_empty(&send_list)) {
- spin_unlock(&ops_lock);
- return POLLIN | POLLRDNORM;
- }
+ if (!list_empty(&send_list))
+ mask = POLLIN | POLLRDNORM;
spin_unlock(&ops_lock);
- return 0;
+
+ return mask;
}
static const struct file_operations dev_fops = {
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
index d9fe3ca40e1..ae9e6a25fe2 100644
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -190,7 +190,6 @@ static struct kobj_type gdlm_ktype = {
};
static struct kset gdlm_kset = {
- .kobj = {.name = "lock_dlm",},
.ktype = &gdlm_ktype,
};
@@ -224,6 +223,7 @@ int gdlm_sysfs_init(void)
{
int error;
+ kobject_set_name(&gdlm_kset.kobj, "lock_dlm");
kobj_set_kset_s(&gdlm_kset, kernel_subsys);
error = kset_register(&gdlm_kset);
if (error)
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
index 1aca51e4509..bd938f06481 100644
--- a/fs/gfs2/locking/dlm/thread.c
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -268,20 +268,16 @@ static inline int check_drop(struct gdlm_ls *ls)
return 0;
}
-static int gdlm_thread(void *data)
+static int gdlm_thread(void *data, int blist)
{
struct gdlm_ls *ls = (struct gdlm_ls *) data;
struct gdlm_lock *lp = NULL;
- int blist = 0;
uint8_t complete, blocking, submit, drop;
DECLARE_WAITQUEUE(wait, current);
/* Only thread1 is allowed to do blocking callbacks since gfs
may wait for a completion callback within a blocking cb. */
- if (current == ls->thread1)
- blist = 1;
-
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ls->thread_wait, &wait);
@@ -333,12 +329,22 @@ static int gdlm_thread(void *data)
return 0;
}
+static int gdlm_thread1(void *data)
+{
+ return gdlm_thread(data, 1);
+}
+
+static int gdlm_thread2(void *data)
+{
+ return gdlm_thread(data, 0);
+}
+
int gdlm_init_threads(struct gdlm_ls *ls)
{
struct task_struct *p;
int error;
- p = kthread_run(gdlm_thread, ls, "lock_dlm1");
+ p = kthread_run(gdlm_thread1, ls, "lock_dlm1");
error = IS_ERR(p);
if (error) {
log_error("can't start lock_dlm1 thread %d", error);
@@ -346,7 +352,7 @@ int gdlm_init_threads(struct gdlm_ls *ls)
}
ls->thread1 = p;
- p = kthread_run(gdlm_thread, ls, "lock_dlm2");
+ p = kthread_run(gdlm_thread2, ls, "lock_dlm2");
error = IS_ERR(p);
if (error) {
log_error("can't start lock_dlm2 thread %d", error);
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
index 0d149c8c493..d3b8ce6fbbe 100644
--- a/fs/gfs2/locking/nolock/main.c
+++ b/fs/gfs2/locking/nolock/main.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f49a12e2408..7df70247325 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -60,6 +60,26 @@ unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
}
/**
+ * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
+ * @mapping: The associated mapping (maybe NULL)
+ * @bd: The gfs2_bufdata to remove
+ *
+ * The log lock _must_ be held when calling this function
+ *
+ */
+
+void gfs2_remove_from_ail(struct address_space *mapping, struct gfs2_bufdata *bd)
+{
+ bd->bd_ail = NULL;
+ list_del_init(&bd->bd_ail_st_list);
+ list_del_init(&bd->bd_ail_gl_list);
+ atomic_dec(&bd->bd_gl->gl_ail_count);
+ if (mapping)
+ gfs2_meta_cache_flush(GFS2_I(mapping->host));
+ brelse(bd->bd_bh);
+}
+
+/**
* gfs2_ail1_start_one - Start I/O on a part of the AIL
* @sdp: the filesystem
* @tr: the part of the AIL
@@ -83,17 +103,9 @@ static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
gfs2_assert(sdp, bd->bd_ail == ai);
- if (!bh){
- list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
- continue;
- }
-
if (!buffer_busy(bh)) {
- if (!buffer_uptodate(bh)) {
- gfs2_log_unlock(sdp);
+ if (!buffer_uptodate(bh))
gfs2_io_error_bh(sdp, bh);
- gfs2_log_lock(sdp);
- }
list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
continue;
}
@@ -103,9 +115,16 @@ static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
+ get_bh(bh);
gfs2_log_unlock(sdp);
- wait_on_buffer(bh);
- ll_rw_block(WRITE, 1, &bh);
+ lock_buffer(bh);
+ if (test_clear_buffer_dirty(bh)) {
+ bh->b_end_io = end_buffer_write_sync;
+ submit_bh(WRITE, bh);
+ } else {
+ unlock_buffer(bh);
+ brelse(bh);
+ }
gfs2_log_lock(sdp);
retry = 1;
@@ -130,11 +149,6 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
bd_ail_st_list) {
bh = bd->bd_bh;
- if (!bh){
- list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
- continue;
- }
-
gfs2_assert(sdp, bd->bd_ail == ai);
if (buffer_busy(bh)) {
@@ -155,13 +169,14 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
{
- struct list_head *head = &sdp->sd_ail1_list;
+ struct list_head *head;
u64 sync_gen;
struct list_head *first;
struct gfs2_ail *first_ai, *ai, *tmp;
int done = 0;
gfs2_log_lock(sdp);
+ head = &sdp->sd_ail1_list;
if (list_empty(head)) {
gfs2_log_unlock(sdp);
return;
@@ -233,11 +248,7 @@ static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
bd = list_entry(head->prev, struct gfs2_bufdata,
bd_ail_st_list);
gfs2_assert(sdp, bd->bd_ail == ai);
- bd->bd_ail = NULL;
- list_del(&bd->bd_ail_st_list);
- list_del(&bd->bd_ail_gl_list);
- atomic_dec(&bd->bd_gl->gl_ail_count);
- brelse(bd->bd_bh);
+ gfs2_remove_from_ail(bd->bd_bh->b_page->mapping, bd);
}
}
@@ -439,10 +450,10 @@ static unsigned int current_tail(struct gfs2_sbd *sdp)
return tail;
}
-static inline void log_incr_head(struct gfs2_sbd *sdp)
+void gfs2_log_incr_head(struct gfs2_sbd *sdp)
{
if (sdp->sd_log_flush_head == sdp->sd_log_tail)
- gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head);
+ BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head);
if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
sdp->sd_log_flush_head = 0;
@@ -451,6 +462,23 @@ static inline void log_incr_head(struct gfs2_sbd *sdp)
}
/**
+ * gfs2_log_write_endio - End of I/O for a log buffer
+ * @bh: The buffer head
+ * @uptodate: I/O Status
+ *
+ */
+
+static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
+{
+ struct gfs2_sbd *sdp = bh->b_private;
+ bh->b_private = NULL;
+
+ end_buffer_write_sync(bh, uptodate);
+ if (atomic_dec_and_test(&sdp->sd_log_in_flight))
+ wake_up(&sdp->sd_log_flush_wait);
+}
+
+/**
* gfs2_log_get_buf - Get and initialize a buffer to use for log control data
* @sdp: The GFS2 superblock
*
@@ -460,25 +488,43 @@ static inline void log_incr_head(struct gfs2_sbd *sdp)
struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
{
u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
- struct gfs2_log_buf *lb;
struct buffer_head *bh;
- lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
- list_add(&lb->lb_list, &sdp->sd_log_flush_list);
-
- bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
+ bh = sb_getblk(sdp->sd_vfs, blkno);
lock_buffer(bh);
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
- unlock_buffer(bh);
-
- log_incr_head(sdp);
+ gfs2_log_incr_head(sdp);
+ atomic_inc(&sdp->sd_log_in_flight);
+ bh->b_private = sdp;
+ bh->b_end_io = gfs2_log_write_endio;
return bh;
}
/**
+ * gfs2_fake_write_endio -
+ * @bh: The buffer head
+ * @uptodate: The I/O Status
+ *
+ */
+
+static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
+{
+ struct buffer_head *real_bh = bh->b_private;
+ struct gfs2_bufdata *bd = real_bh->b_private;
+ struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
+
+ end_buffer_write_sync(bh, uptodate);
+ free_buffer_head(bh);
+ unlock_buffer(real_bh);
+ brelse(real_bh);
+ if (atomic_dec_and_test(&sdp->sd_log_in_flight))
+ wake_up(&sdp->sd_log_flush_wait);
+}
+
+/**
* gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
* @sdp: the filesystem
* @data: the data the buffer_head should point to
@@ -490,22 +536,20 @@ struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
struct buffer_head *real)
{
u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
- struct gfs2_log_buf *lb;
struct buffer_head *bh;
- lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
- list_add(&lb->lb_list, &sdp->sd_log_flush_list);
- lb->lb_real = real;
-
- bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
+ bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
atomic_set(&bh->b_count, 1);
- bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
+ bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
set_bh_page(bh, real->b_page, bh_offset(real));
bh->b_blocknr = blkno;
bh->b_size = sdp->sd_sb.sb_bsize;
bh->b_bdev = sdp->sd_vfs->s_bdev;
+ bh->b_private = real;
+ bh->b_end_io = gfs2_fake_write_endio;
- log_incr_head(sdp);
+ gfs2_log_incr_head(sdp);
+ atomic_inc(&sdp->sd_log_in_flight);
return bh;
}
@@ -572,45 +616,75 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
gfs2_assert_withdraw(sdp, !pull);
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
- log_incr_head(sdp);
+ gfs2_log_incr_head(sdp);
}
static void log_flush_commit(struct gfs2_sbd *sdp)
{
- struct list_head *head = &sdp->sd_log_flush_list;
- struct gfs2_log_buf *lb;
- struct buffer_head *bh;
- int flushcount = 0;
+ DEFINE_WAIT(wait);
+
+ if (atomic_read(&sdp->sd_log_in_flight)) {
+ do {
+ prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&sdp->sd_log_in_flight))
+ io_schedule();
+ } while(atomic_read(&sdp->sd_log_in_flight));
+ finish_wait(&sdp->sd_log_flush_wait, &wait);
+ }
- while (!list_empty(head)) {
- lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
- list_del(&lb->lb_list);
- bh = lb->lb_bh;
+ log_write_header(sdp, 0, 0);
+}
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- gfs2_io_error_bh(sdp, bh);
- if (lb->lb_real) {
- while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
- schedule();
- free_buffer_head(bh);
- } else
+static void gfs2_ordered_write(struct gfs2_sbd *sdp)
+{
+ struct gfs2_bufdata *bd;
+ struct buffer_head *bh;
+ LIST_HEAD(written);
+
+ gfs2_log_lock(sdp);
+ while (!list_empty(&sdp->sd_log_le_ordered)) {
+ bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list);
+ list_move(&bd->bd_le.le_list, &written);
+ bh = bd->bd_bh;
+ if (!buffer_dirty(bh))
+ continue;
+ get_bh(bh);
+ gfs2_log_unlock(sdp);
+ lock_buffer(bh);
+ if (test_clear_buffer_dirty(bh)) {
+ bh->b_end_io = end_buffer_write_sync;
+ submit_bh(WRITE, bh);
+ } else {
+ unlock_buffer(bh);
brelse(bh);
- kfree(lb);
- flushcount++;
+ }
+ gfs2_log_lock(sdp);
}
+ list_splice(&written, &sdp->sd_log_le_ordered);
+ gfs2_log_unlock(sdp);
+}
- /* If nothing was journaled, the header is unplanned and unwanted. */
- if (flushcount) {
- log_write_header(sdp, 0, 0);
- } else {
- unsigned int tail;
- tail = current_tail(sdp);
+static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
+{
+ struct gfs2_bufdata *bd;
+ struct buffer_head *bh;
- gfs2_ail1_empty(sdp, 0);
- if (sdp->sd_log_tail != tail)
- log_pull_tail(sdp, tail);
+ gfs2_log_lock(sdp);
+ while (!list_empty(&sdp->sd_log_le_ordered)) {
+ bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list);
+ bh = bd->bd_bh;
+ if (buffer_locked(bh)) {
+ get_bh(bh);
+ gfs2_log_unlock(sdp);
+ wait_on_buffer(bh);
+ brelse(bh);
+ gfs2_log_lock(sdp);
+ continue;
+ }
+ list_del_init(&bd->bd_le.le_list);
}
+ gfs2_log_unlock(sdp);
}
/**
@@ -640,10 +714,16 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
INIT_LIST_HEAD(&ai->ai_ail1_list);
INIT_LIST_HEAD(&ai->ai_ail2_list);
- gfs2_assert_withdraw(sdp,
- sdp->sd_log_num_buf + sdp->sd_log_num_jdata ==
- sdp->sd_log_commited_buf +
- sdp->sd_log_commited_databuf);
+ if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
+ printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
+ sdp->sd_log_commited_buf);
+ gfs2_assert_withdraw(sdp, 0);
+ }
+ if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
+ printk(KERN_INFO "GFS2: log databuf %u %u\n",
+ sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
+ gfs2_assert_withdraw(sdp, 0);
+ }
gfs2_assert_withdraw(sdp,
sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
@@ -651,8 +731,11 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
sdp->sd_log_flush_wrapped = 0;
ai->ai_first = sdp->sd_log_flush_head;
+ gfs2_ordered_write(sdp);
lops_before_commit(sdp);
- if (!list_empty(&sdp->sd_log_flush_list))
+ gfs2_ordered_wait(sdp);
+
+ if (sdp->sd_log_head != sdp->sd_log_flush_head)
log_flush_commit(sdp);
else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
gfs2_log_lock(sdp);
@@ -744,7 +827,6 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
- gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 8e7aa0f2910..dae28240062 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -52,12 +52,14 @@ int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
+void gfs2_log_incr_head(struct gfs2_sbd *sdp);
struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp);
struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
struct buffer_head *real);
void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
+void gfs2_remove_from_ail(struct address_space *mapping, struct gfs2_bufdata *bd);
void gfs2_log_shutdown(struct gfs2_sbd *sdp);
void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 3b395c41b2f..6c27cea761c 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -27,7 +27,104 @@
#include "trans.h"
#include "util.h"
-static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+/**
+ * gfs2_pin - Pin a buffer in memory
+ * @sdp: The superblock
+ * @bh: The buffer to be pinned
+ *
+ * The log lock must be held when calling this function
+ */
+static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
+{
+ struct gfs2_bufdata *bd;
+
+ gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
+
+ clear_buffer_dirty(bh);
+ if (test_set_buffer_pinned(bh))
+ gfs2_assert_withdraw(sdp, 0);
+ if (!buffer_uptodate(bh))
+ gfs2_io_error_bh(sdp, bh);
+ bd = bh->b_private;
+ /* If this buffer is in the AIL and it has already been written
+ * to in-place disk block, remove it from the AIL.
+ */
+ if (bd->bd_ail)
+ list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
+ get_bh(bh);
+}
+
+/**
+ * gfs2_unpin - Unpin a buffer
+ * @sdp: the filesystem the buffer belongs to
+ * @bh: The buffer to unpin
+ * @ai:
+ *
+ */
+
+static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ struct gfs2_ail *ai)
+{
+ struct gfs2_bufdata *bd = bh->b_private;
+
+ gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
+
+ if (!buffer_pinned(bh))
+ gfs2_assert_withdraw(sdp, 0);
+
+ lock_buffer(bh);
+ mark_buffer_dirty(bh);
+ clear_buffer_pinned(bh);
+
+ gfs2_log_lock(sdp);
+ if (bd->bd_ail) {
+ list_del(&bd->bd_ail_st_list);
+ brelse(bh);
+ } else {
+ struct gfs2_glock *gl = bd->bd_gl;
+ list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
+ atomic_inc(&gl->gl_ail_count);
+ }
+ bd->bd_ail = ai;
+ list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
+ gfs2_log_unlock(sdp);
+ unlock_buffer(bh);
+}
+
+
+static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
+{
+ return (struct gfs2_log_descriptor *)bh->b_data;
+}
+
+static inline __be64 *bh_log_ptr(struct buffer_head *bh)
+{
+ struct gfs2_log_descriptor *ld = bh_log_desc(bh);
+ return (__force __be64 *)(ld + 1);
+}
+
+static inline __be64 *bh_ptr_end(struct buffer_head *bh)
+{
+ return (__force __be64 *)(bh->b_data + bh->b_size);
+}
+
+
+static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
+{
+ struct buffer_head *bh = gfs2_log_get_buf(sdp);
+ struct gfs2_log_descriptor *ld = bh_log_desc(bh);
+ ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
+ ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
+ ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
+ ld->ld_type = cpu_to_be32(ld_type);
+ ld->ld_length = 0;
+ ld->ld_data1 = 0;
+ ld->ld_data2 = 0;
+ memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
+ return bh;
+}
+
+static void __glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
{
struct gfs2_glock *gl;
struct gfs2_trans *tr = current->journal_info;
@@ -38,15 +135,19 @@ static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
return;
- gfs2_log_lock(sdp);
- if (!list_empty(&le->le_list)){
- gfs2_log_unlock(sdp);
+ if (!list_empty(&le->le_list))
return;
- }
+
gfs2_glock_hold(gl);
set_bit(GLF_DIRTY, &gl->gl_flags);
sdp->sd_log_num_gl++;
list_add(&le->le_list, &sdp->sd_log_le_gl);
+}
+
+static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+{
+ gfs2_log_lock(sdp);
+ __glock_lo_add(sdp, le);
gfs2_log_unlock(sdp);
}
@@ -71,30 +172,25 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
struct gfs2_trans *tr;
+ lock_buffer(bd->bd_bh);
gfs2_log_lock(sdp);
- if (!list_empty(&bd->bd_list_tr)) {
- gfs2_log_unlock(sdp);
- return;
- }
+ if (!list_empty(&bd->bd_list_tr))
+ goto out;
tr = current->journal_info;
tr->tr_touched = 1;
tr->tr_num_buf++;
list_add(&bd->bd_list_tr, &tr->tr_list_buf);
- gfs2_log_unlock(sdp);
-
if (!list_empty(&le->le_list))
- return;
-
- gfs2_trans_add_gl(bd->bd_gl);
-
+ goto out;
+ __glock_lo_add(sdp, &bd->bd_gl->gl_le);
gfs2_meta_check(sdp, bd->bd_bh);
gfs2_pin(sdp, bd->bd_bh);
- gfs2_log_lock(sdp);
sdp->sd_log_num_buf++;
list_add(&le->le_list, &sdp->sd_log_le_buf);
- gfs2_log_unlock(sdp);
-
tr->tr_num_buf_new++;
+out:
+ gfs2_log_unlock(sdp);
+ unlock_buffer(bd->bd_bh);
}
static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
@@ -117,8 +213,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
struct buffer_head *bh;
struct gfs2_log_descriptor *ld;
struct gfs2_bufdata *bd1 = NULL, *bd2;
- unsigned int total = sdp->sd_log_num_buf;
- unsigned int offset = BUF_OFFSET;
+ unsigned int total;
unsigned int limit;
unsigned int num;
unsigned n;
@@ -127,22 +222,20 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
limit = buf_limit(sdp);
/* for 4k blocks, limit = 503 */
+ gfs2_log_lock(sdp);
+ total = sdp->sd_log_num_buf;
bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
while(total) {
num = total;
if (total > limit)
num = limit;
- bh = gfs2_log_get_buf(sdp);
- ld = (struct gfs2_log_descriptor *)bh->b_data;
- ptr = (__be64 *)(bh->b_data + offset);
- ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
- ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
- ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
- ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
+ gfs2_log_unlock(sdp);
+ bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
+ gfs2_log_lock(sdp);
+ ld = bh_log_desc(bh);
+ ptr = bh_log_ptr(bh);
ld->ld_length = cpu_to_be32(num + 1);
ld->ld_data1 = cpu_to_be32(num);
- ld->ld_data2 = cpu_to_be32(0);
- memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
n = 0;
list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
@@ -152,21 +245,27 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
break;
}
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
+ gfs2_log_unlock(sdp);
+ submit_bh(WRITE, bh);
+ gfs2_log_lock(sdp);
n = 0;
list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
bd_le.le_list) {
+ get_bh(bd2->bd_bh);
+ gfs2_log_unlock(sdp);
+ lock_buffer(bd2->bd_bh);
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
+ submit_bh(WRITE, bh);
+ gfs2_log_lock(sdp);
if (++n >= num)
break;
}
+ BUG_ON(total < num);
total -= num;
}
+ gfs2_log_unlock(sdp);
}
static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
@@ -270,11 +369,8 @@ static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
tr = current->journal_info;
tr->tr_touched = 1;
tr->tr_num_revoke++;
-
- gfs2_log_lock(sdp);
sdp->sd_log_num_revoke++;
list_add(&le->le_list, &sdp->sd_log_le_revoke);
- gfs2_log_unlock(sdp);
}
static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
@@ -284,32 +380,25 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
struct buffer_head *bh;
unsigned int offset;
struct list_head *head = &sdp->sd_log_le_revoke;
- struct gfs2_revoke *rv;
+ struct gfs2_bufdata *bd;
if (!sdp->sd_log_num_revoke)
return;
- bh = gfs2_log_get_buf(sdp);
- ld = (struct gfs2_log_descriptor *)bh->b_data;
- ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
- ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
- ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
- ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
+ bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
+ ld = bh_log_desc(bh);
ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
sizeof(u64)));
ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
- ld->ld_data2 = cpu_to_be32(0);
- memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
offset = sizeof(struct gfs2_log_descriptor);
while (!list_empty(head)) {
- rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
- list_del_init(&rv->rv_le.le_list);
+ bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
+ list_del_init(&bd->bd_le.le_list);
sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
+ submit_bh(WRITE, bh);
bh = gfs2_log_get_buf(sdp);
mh = (struct gfs2_meta_header *)bh->b_data;
@@ -319,15 +408,14 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
offset = sizeof(struct gfs2_meta_header);
}
- *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
- kfree(rv);
+ *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
offset += sizeof(u64);
}
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
+ submit_bh(WRITE, bh);
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
@@ -466,222 +554,136 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
struct address_space *mapping = bd->bd_bh->b_page->mapping;
struct gfs2_inode *ip = GFS2_I(mapping->host);
+ lock_buffer(bd->bd_bh);
gfs2_log_lock(sdp);
- if (!list_empty(&bd->bd_list_tr)) {
- gfs2_log_unlock(sdp);
- return;
- }
+ if (!list_empty(&bd->bd_list_tr))
+ goto out;
tr->tr_touched = 1;
if (gfs2_is_jdata(ip)) {
tr->tr_num_buf++;
list_add(&bd->bd_list_tr, &tr->tr_list_buf);
}
- gfs2_log_unlock(sdp);
if (!list_empty(&le->le_list))
- return;
+ goto out;
- gfs2_trans_add_gl(bd->bd_gl);
+ __glock_lo_add(sdp, &bd->bd_gl->gl_le);
if (gfs2_is_jdata(ip)) {
- sdp->sd_log_num_jdata++;
gfs2_pin(sdp, bd->bd_bh);
tr->tr_num_databuf_new++;
+ sdp->sd_log_num_databuf++;
+ list_add(&le->le_list, &sdp->sd_log_le_databuf);
+ } else {
+ list_add(&le->le_list, &sdp->sd_log_le_ordered);
}
- gfs2_log_lock(sdp);
- sdp->sd_log_num_databuf++;
- list_add(&le->le_list, &sdp->sd_log_le_databuf);
+out:
gfs2_log_unlock(sdp);
+ unlock_buffer(bd->bd_bh);
}
-static int gfs2_check_magic(struct buffer_head *bh)
+static void gfs2_check_magic(struct buffer_head *bh)
{
- struct page *page = bh->b_page;
void *kaddr;
__be32 *ptr;
- int rv = 0;
- kaddr = kmap_atomic(page, KM_USER0);
+ clear_buffer_escaped(bh);
+ kaddr = kmap_atomic(bh->b_page, KM_USER0);
ptr = kaddr + bh_offset(bh);
if (*ptr == cpu_to_be32(GFS2_MAGIC))
- rv = 1;
+ set_buffer_escaped(bh);
kunmap_atomic(kaddr, KM_USER0);
-
- return rv;
}
-/**
- * databuf_lo_before_commit - Scan the data buffers, writing as we go
- *
- * Here we scan through the lists of buffers and make the assumption
- * that any buffer thats been pinned is being journaled, and that
- * any unpinned buffer is an ordered write data buffer and therefore
- * will be written back rather than journaled.
- */
-static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
+static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ struct list_head *list, struct list_head *done,
+ unsigned int n)
{
- LIST_HEAD(started);
- struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
- struct buffer_head *bh = NULL,*bh1 = NULL;
+ struct buffer_head *bh1;
struct gfs2_log_descriptor *ld;
- unsigned int limit;
- unsigned int total_dbuf;
- unsigned int total_jdata = sdp->sd_log_num_jdata;
- unsigned int num, n;
- __be64 *ptr = NULL;
+ struct gfs2_bufdata *bd;
+ __be64 *ptr;
+
+ if (!bh)
+ return;
- limit = databuf_limit(sdp);
+ ld = bh_log_desc(bh);
+ ld->ld_length = cpu_to_be32(n + 1);
+ ld->ld_data1 = cpu_to_be32(n);
- /*
- * Start writing ordered buffers, write journaled buffers
- * into the log along with a header
- */
+ ptr = bh_log_ptr(bh);
+
+ get_bh(bh);
+ submit_bh(WRITE, bh);
gfs2_log_lock(sdp);
- total_dbuf = sdp->sd_log_num_databuf;
- bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
- bd_le.le_list);
- while(total_dbuf) {
- num = total_jdata;
- if (num > limit)
- num = limit;
- n = 0;
- list_for_each_entry_safe_continue(bd1, bdt,
- &sdp->sd_log_le_databuf,
- bd_le.le_list) {
- /* store off the buffer head in a local ptr since
- * gfs2_bufdata might change when we drop the log lock
- */
- bh1 = bd1->bd_bh;
-
- /* An ordered write buffer */
- if (bh1 && !buffer_pinned(bh1)) {
- list_move(&bd1->bd_le.le_list, &started);
- if (bd1 == bd2) {
- bd2 = NULL;
- bd2 = list_prepare_entry(bd2,
- &sdp->sd_log_le_databuf,
- bd_le.le_list);
- }
- total_dbuf--;
- if (bh1) {
- if (buffer_dirty(bh1)) {
- get_bh(bh1);
-
- gfs2_log_unlock(sdp);
-
- ll_rw_block(SWRITE, 1, &bh1);
- brelse(bh1);
-
- gfs2_log_lock(sdp);
- }
- continue;
- }
- continue;
- } else if (bh1) { /* A journaled buffer */
- int magic;
- gfs2_log_unlock(sdp);
- if (!bh) {
- bh = gfs2_log_get_buf(sdp);
- ld = (struct gfs2_log_descriptor *)
- bh->b_data;
- ptr = (__be64 *)(bh->b_data +
- DATABUF_OFFSET);
- ld->ld_header.mh_magic =
- cpu_to_be32(GFS2_MAGIC);
- ld->ld_header.mh_type =
- cpu_to_be32(GFS2_METATYPE_LD);
- ld->ld_header.mh_format =
- cpu_to_be32(GFS2_FORMAT_LD);
- ld->ld_type =
- cpu_to_be32(GFS2_LOG_DESC_JDATA);
- ld->ld_length = cpu_to_be32(num + 1);
- ld->ld_data1 = cpu_to_be32(num);
- ld->ld_data2 = cpu_to_be32(0);
- memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
- }
- magic = gfs2_check_magic(bh1);
- *ptr++ = cpu_to_be64(bh1->b_blocknr);
- *ptr++ = cpu_to_be64((__u64)magic);
- clear_buffer_escaped(bh1);
- if (unlikely(magic != 0))
- set_buffer_escaped(bh1);
- gfs2_log_lock(sdp);
- if (++n >= num)
- break;
- } else if (!bh1) {
- total_dbuf--;
- sdp->sd_log_num_databuf--;
- list_del_init(&bd1->bd_le.le_list);
- if (bd1 == bd2) {
- bd2 = NULL;
- bd2 = list_prepare_entry(bd2,
- &sdp->sd_log_le_databuf,
- bd_le.le_list);
- }
- kmem_cache_free(gfs2_bufdata_cachep, bd1);
- }
+ while(!list_empty(list)) {
+ bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
+ list_move_tail(&bd->bd_le.le_list, done);
+ get_bh(bd->bd_bh);
+ while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
+ gfs2_log_incr_head(sdp);
+ ptr += 2;
}
gfs2_log_unlock(sdp);
- if (bh) {
- set_buffer_mapped(bh);
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- bh = NULL;
+ lock_buffer(bd->bd_bh);
+ if (buffer_escaped(bd->bd_bh)) {
+ void *kaddr;
+ bh1 = gfs2_log_get_buf(sdp);
+ kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
+ memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
+ bh1->b_size);
+ kunmap_atomic(kaddr, KM_USER0);
+ *(__be32 *)bh1->b_data = 0;
+ clear_buffer_escaped(bd->bd_bh);
+ unlock_buffer(bd->bd_bh);
+ brelse(bd->bd_bh);
+ } else {
+ bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
}
- n = 0;
+ submit_bh(WRITE, bh1);
gfs2_log_lock(sdp);
- list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
- bd_le.le_list) {
- if (!bd2->bd_bh)
- continue;
- /* copy buffer if it needs escaping */
- gfs2_log_unlock(sdp);
- if (unlikely(buffer_escaped(bd2->bd_bh))) {
- void *kaddr;
- struct page *page = bd2->bd_bh->b_page;
- bh = gfs2_log_get_buf(sdp);
- kaddr = kmap_atomic(page, KM_USER0);
- memcpy(bh->b_data,
- kaddr + bh_offset(bd2->bd_bh),
- sdp->sd_sb.sb_bsize);
- kunmap_atomic(kaddr, KM_USER0);
- *(__be32 *)bh->b_data = 0;
- } else {
- bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
- }
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- gfs2_log_lock(sdp);
- if (++n >= num)
- break;
- }
- bh = NULL;
- BUG_ON(total_dbuf < num);
- total_dbuf -= num;
- total_jdata -= num;
+ ptr += 2;
}
gfs2_log_unlock(sdp);
+ brelse(bh);
+}
- /* Wait on all ordered buffers */
- while (!list_empty(&started)) {
- gfs2_log_lock(sdp);
- bd1 = list_entry(started.next, struct gfs2_bufdata,
- bd_le.le_list);
- list_del_init(&bd1->bd_le.le_list);
- sdp->sd_log_num_databuf--;
- bh = bd1->bd_bh;
- if (bh) {
- bh->b_private = NULL;
- get_bh(bh);
- gfs2_log_unlock(sdp);
- wait_on_buffer(bh);
- brelse(bh);
- } else
- gfs2_log_unlock(sdp);
+/**
+ * databuf_lo_before_commit - Scan the data buffers, writing as we go
+ *
+ */
- kmem_cache_free(gfs2_bufdata_cachep, bd1);
- }
+static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
+{
+ struct gfs2_bufdata *bd = NULL;
+ struct buffer_head *bh = NULL;
+ unsigned int n = 0;
+ __be64 *ptr = NULL, *end = NULL;
+ LIST_HEAD(processed);
+ LIST_HEAD(in_progress);
- /* We've removed all the ordered write bufs here, so only jdata left */
- gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
+ gfs2_log_lock(sdp);
+ while (!list_empty(&sdp->sd_log_le_databuf)) {
+ if (ptr == end) {
+ gfs2_log_unlock(sdp);
+ gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
+ n = 0;
+ bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
+ ptr = bh_log_ptr(bh);
+ end = bh_ptr_end(bh) - 1;
+ gfs2_log_lock(sdp);
+ continue;
+ }
+ bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
+ list_move_tail(&bd->bd_le.le_list, &in_progress);
+ gfs2_check_magic(bd->bd_bh);
+ *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
+ *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
+ n++;
+ }
+ gfs2_log_unlock(sdp);
+ gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
+ gfs2_log_lock(sdp);
+ list_splice(&processed, &sdp->sd_log_le_databuf);
+ gfs2_log_unlock(sdp);
}
static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
@@ -765,11 +767,9 @@ static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
list_del_init(&bd->bd_le.le_list);
sdp->sd_log_num_databuf--;
- sdp->sd_log_num_jdata--;
gfs2_unpin(sdp, bd->bd_bh, ai);
}
gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
- gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
}
@@ -817,10 +817,10 @@ const struct gfs2_log_operations gfs2_databuf_lops = {
const struct gfs2_log_operations *gfs2_log_ops[] = {
&gfs2_glock_lops,
+ &gfs2_databuf_lops,
&gfs2_buf_lops,
- &gfs2_revoke_lops,
&gfs2_rg_lops,
- &gfs2_databuf_lops,
+ &gfs2_revoke_lops,
NULL,
};
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index d5d4e68b880..79c91fd8381 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -107,6 +107,8 @@ static int __init init_gfs2_fs(void)
fail_unregister:
unregister_filesystem(&gfs2_fs_type);
fail:
+ gfs2_glock_exit();
+
if (gfs2_bufdata_cachep)
kmem_cache_destroy(gfs2_bufdata_cachep);
@@ -127,6 +129,7 @@ fail:
static void __exit exit_gfs2_fs(void)
{
+ gfs2_glock_exit();
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 8da343b34ae..4da423985e4 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -297,74 +297,35 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
unlock_page(bh->b_page);
}
-/**
- * gfs2_pin - Pin a buffer in memory
- * @sdp: the filesystem the buffer belongs to
- * @bh: The buffer to be pinned
- *
- */
-
-void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
+void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
{
+ struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
struct gfs2_bufdata *bd = bh->b_private;
-
- gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
-
- if (test_set_buffer_pinned(bh))
- gfs2_assert_withdraw(sdp, 0);
-
- wait_on_buffer(bh);
-
- /* If this buffer is in the AIL and it has already been written
- to in-place disk block, remove it from the AIL. */
-
- gfs2_log_lock(sdp);
- if (bd->bd_ail && !buffer_in_io(bh))
- list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
- gfs2_log_unlock(sdp);
-
- clear_buffer_dirty(bh);
- wait_on_buffer(bh);
-
- if (!buffer_uptodate(bh))
- gfs2_io_error_bh(sdp, bh);
-
- get_bh(bh);
-}
-
-/**
- * gfs2_unpin - Unpin a buffer
- * @sdp: the filesystem the buffer belongs to
- * @bh: The buffer to unpin
- * @ai:
- *
- */
-
-void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
- struct gfs2_ail *ai)
-{
- struct gfs2_bufdata *bd = bh->b_private;
-
- gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
-
- if (!buffer_pinned(bh))
- gfs2_assert_withdraw(sdp, 0);
-
- mark_buffer_dirty(bh);
- clear_buffer_pinned(bh);
-
- gfs2_log_lock(sdp);
- if (bd->bd_ail) {
- list_del(&bd->bd_ail_st_list);
+ if (test_clear_buffer_pinned(bh)) {
+ list_del_init(&bd->bd_le.le_list);
+ if (meta) {
+ gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
+ sdp->sd_log_num_buf--;
+ tr->tr_num_buf_rm++;
+ } else {
+ gfs2_assert_warn(sdp, sdp->sd_log_num_databuf);
+ sdp->sd_log_num_databuf--;
+ tr->tr_num_databuf_rm++;
+ }
+ tr->tr_touched = 1;
brelse(bh);
- } else {
- struct gfs2_glock *gl = bd->bd_gl;
- list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
- atomic_inc(&gl->gl_ail_count);
}
- bd->bd_ail = ai;
- list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
- gfs2_log_unlock(sdp);
+ if (bd) {
+ if (bd->bd_ail) {
+ gfs2_remove_from_ail(NULL, bd);
+ bh->b_private = NULL;
+ bd->bd_bh = NULL;
+ bd->bd_blkno = bh->b_blocknr;
+ gfs2_trans_add_revoke(sdp, bd);
+ }
+ }
+ clear_buffer_dirty(bh);
+ clear_buffer_uptodate(bh);
}
/**
@@ -383,44 +344,11 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
while (blen) {
bh = getbuf(ip->i_gl, bstart, NO_CREATE);
if (bh) {
- struct gfs2_bufdata *bd = bh->b_private;
-
- if (test_clear_buffer_pinned(bh)) {
- struct gfs2_trans *tr = current->journal_info;
- struct gfs2_inode *bh_ip =
- GFS2_I(bh->b_page->mapping->host);
-
- gfs2_log_lock(sdp);
- list_del_init(&bd->bd_le.le_list);
- gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
- sdp->sd_log_num_buf--;
- gfs2_log_unlock(sdp);
- if (bh_ip->i_inode.i_private != NULL)
- tr->tr_num_databuf_rm++;
- else
- tr->tr_num_buf_rm++;
- brelse(bh);
- }
- if (bd) {
- gfs2_log_lock(sdp);
- if (bd->bd_ail) {
- u64 blkno = bh->b_blocknr;
- bd->bd_ail = NULL;
- list_del(&bd->bd_ail_st_list);
- list_del(&bd->bd_ail_gl_list);
- atomic_dec(&bd->bd_gl->gl_ail_count);
- brelse(bh);
- gfs2_log_unlock(sdp);
- gfs2_trans_add_revoke(sdp, blkno);
- } else
- gfs2_log_unlock(sdp);
- }
-
lock_buffer(bh);
- clear_buffer_dirty(bh);
- clear_buffer_uptodate(bh);
+ gfs2_log_lock(sdp);
+ gfs2_remove_from_journal(bh, current->journal_info, 1);
+ gfs2_log_unlock(sdp);
unlock_buffer(bh);
-
brelse(bh);
}
@@ -446,10 +374,10 @@ void gfs2_meta_cache_flush(struct gfs2_inode *ip)
for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
bh_slot = &ip->i_cache[x];
- if (!*bh_slot)
- break;
- brelse(*bh_slot);
- *bh_slot = NULL;
+ if (*bh_slot) {
+ brelse(*bh_slot);
+ *bh_slot = NULL;
+ }
}
spin_unlock(&ip->i_spin);
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 527bf19d969..b7048222ebb 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -50,9 +50,9 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
int meta);
-void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
-void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
- struct gfs2_ail *ai);
+
+void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr,
+ int meta);
void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
diff --git a/fs/gfs2/mount.c b/fs/gfs2/mount.c
index 4864659555d..b941f9f9f95 100644
--- a/fs/gfs2/mount.c
+++ b/fs/gfs2/mount.c
@@ -42,6 +42,7 @@ enum {
Opt_nosuiddir,
Opt_data_writeback,
Opt_data_ordered,
+ Opt_err,
};
static match_table_t tokens = {
@@ -64,7 +65,8 @@ static match_table_t tokens = {
{Opt_suiddir, "suiddir"},
{Opt_nosuiddir, "nosuiddir"},
{Opt_data_writeback, "data=writeback"},
- {Opt_data_ordered, "data=ordered"}
+ {Opt_data_ordered, "data=ordered"},
+ {Opt_err, NULL}
};
/**
@@ -237,6 +239,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
case Opt_data_ordered:
args->ar_data = GFS2_DATA_ORDERED;
break;
+ case Opt_err:
default:
fs_info(sdp, "unknown option: %s\n", o);
error = -EINVAL;
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 42a5f58f6fc..873a511ef2b 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -90,7 +90,7 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
error = gfs2_block_map(inode, lblock, 0, bh_result);
if (error)
return error;
- if (bh_result->b_blocknr == 0)
+ if (!buffer_mapped(bh_result))
return -EIO;
return 0;
}
@@ -414,7 +414,8 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
if (ind_blocks || data_blocks)
rblocks += RES_STATFS + RES_QUOTA;
- error = gfs2_trans_begin(sdp, rblocks, 0);
+ error = gfs2_trans_begin(sdp, rblocks,
+ PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
if (error)
goto out_trans_fail;
@@ -616,58 +617,50 @@ static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
return dblock;
}
-static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
+static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
struct gfs2_bufdata *bd;
+ lock_buffer(bh);
gfs2_log_lock(sdp);
+ clear_buffer_dirty(bh);
bd = bh->b_private;
if (bd) {
- bd->bd_bh = NULL;
- bh->b_private = NULL;
- if (!bd->bd_ail && list_empty(&bd->bd_le.le_list))
- kmem_cache_free(gfs2_bufdata_cachep, bd);
+ if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
+ list_del_init(&bd->bd_le.le_list);
+ else
+ gfs2_remove_from_journal(bh, current->journal_info, 0);
}
- gfs2_log_unlock(sdp);
-
- lock_buffer(bh);
- clear_buffer_dirty(bh);
bh->b_bdev = NULL;
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
- clear_buffer_delay(bh);
+ gfs2_log_unlock(sdp);
unlock_buffer(bh);
}
static void gfs2_invalidatepage(struct page *page, unsigned long offset)
{
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
- struct buffer_head *head, *bh, *next;
- unsigned int curr_off = 0;
+ struct buffer_head *bh, *head;
+ unsigned long pos = 0;
BUG_ON(!PageLocked(page));
if (offset == 0)
ClearPageChecked(page);
if (!page_has_buffers(page))
- return;
+ goto out;
bh = head = page_buffers(page);
do {
- unsigned int next_off = curr_off + bh->b_size;
- next = bh->b_this_page;
-
- if (offset <= curr_off)
- discard_buffer(sdp, bh);
-
- curr_off = next_off;
- bh = next;
+ if (offset <= pos)
+ gfs2_discard(sdp, bh);
+ pos += bh->b_size;
+ bh = bh->b_this_page;
} while (bh != head);
-
- if (!offset)
+out:
+ if (offset == 0)
try_to_release_page(page, 0);
-
- return;
}
/**
@@ -736,59 +729,6 @@ out:
}
/**
- * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
- * @bh: the buffer we're stuck on
- *
- */
-
-static void stuck_releasepage(struct buffer_head *bh)
-{
- struct inode *inode = bh->b_page->mapping->host;
- struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
- struct gfs2_bufdata *bd = bh->b_private;
- struct gfs2_glock *gl;
-static unsigned limit = 0;
-
- if (limit > 3)
- return;
- limit++;
-
- fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
- fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
- (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
- fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
- fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
-
- if (!bd)
- return;
-
- gl = bd->bd_gl;
-
- fs_warn(sdp, "gl = (%u, %llu)\n",
- gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
-
- fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
- (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
- (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
-
- if (gl->gl_ops == &gfs2_inode_glops) {
- struct gfs2_inode *ip = gl->gl_object;
- unsigned int x;
-
- if (!ip)
- return;
-
- fs_warn(sdp, "ip = %llu %llu\n",
- (unsigned long long)ip->i_no_formal_ino,
- (unsigned long long)ip->i_no_addr);
-
- for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
- fs_warn(sdp, "ip->i_cache[%u] = %s\n",
- x, (ip->i_cache[x]) ? "!NULL" : "NULL");
- }
-}
-
-/**
* gfs2_releasepage - free the metadata associated with a page
* @page: the page that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
@@ -805,41 +745,39 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
- unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
if (!page_has_buffers(page))
- goto out;
+ return 0;
+ gfs2_log_lock(sdp);
head = bh = page_buffers(page);
do {
- while (atomic_read(&bh->b_count)) {
- if (!atomic_read(&aspace->i_writecount))
- return 0;
-
- if (!(gfp_mask & __GFP_WAIT))
- return 0;
-
- if (time_after_eq(jiffies, t)) {
- stuck_releasepage(bh);
- /* should we withdraw here? */
- return 0;
- }
-
- yield();
- }
-
+ if (atomic_read(&bh->b_count))
+ goto cannot_release;
+ bd = bh->b_private;
+ if (bd && bd->bd_ail)
+ goto cannot_release;
gfs2_assert_warn(sdp, !buffer_pinned(bh));
gfs2_assert_warn(sdp, !buffer_dirty(bh));
+ bh = bh->b_this_page;
+ } while(bh != head);
+ gfs2_log_unlock(sdp);
+ head = bh = page_buffers(page);
+ do {
gfs2_log_lock(sdp);
bd = bh->b_private;
if (bd) {
gfs2_assert_warn(sdp, bd->bd_bh == bh);
gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
- gfs2_assert_warn(sdp, !bd->bd_ail);
- bd->bd_bh = NULL;
- if (!list_empty(&bd->bd_le.le_list))
- bd = NULL;
+ if (!list_empty(&bd->bd_le.le_list)) {
+ if (!buffer_pinned(bh))
+ list_del_init(&bd->bd_le.le_list);
+ else
+ bd = NULL;
+ }
+ if (bd)
+ bd->bd_bh = NULL;
bh->b_private = NULL;
}
gfs2_log_unlock(sdp);
@@ -849,8 +787,10 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
bh = bh->b_this_page;
} while (bh != head);
-out:
return try_to_free_buffers(page);
+cannot_release:
+ gfs2_log_unlock(sdp);
+ return 0;
}
const struct address_space_operations gfs2_file_aops = {
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index b8312edee0e..e2d1347796a 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -237,7 +237,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
inode = gfs2_inode_lookup(sb, DT_UNKNOWN,
inum->no_addr,
- 0);
+ 0, 0);
if (!inode)
goto fail;
if (IS_ERR(inode)) {
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 94d76ace0b9..46a9e10ff17 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -571,7 +571,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
int error = 0;
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
+ flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE
+ | GL_FLOCK;
mutex_lock(&fp->f_fl_mutex);
@@ -579,21 +580,19 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
if (gl) {
if (fl_gh->gh_state == state)
goto out;
- gfs2_glock_hold(gl);
flock_lock_file_wait(file,
&(struct file_lock){.fl_type = F_UNLCK});
- gfs2_glock_dq_uninit(fl_gh);
+ gfs2_glock_dq_wait(fl_gh);
+ gfs2_holder_reinit(state, flags, fl_gh);
} else {
error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
ip->i_no_addr, &gfs2_flock_glops,
CREATE, &gl);
if (error)
goto out;
+ gfs2_holder_init(gl, state, flags, fl_gh);
+ gfs2_glock_put(gl);
}
-
- gfs2_holder_init(gl, state, flags, fl_gh);
- gfs2_glock_put(gl);
-
error = gfs2_glock_nq(fl_gh);
if (error) {
gfs2_holder_uninit(fl_gh);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index cf5aa505054..17de58e83d9 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -28,18 +28,18 @@
#include "lm.h"
#include "mount.h"
#include "ops_fstype.h"
+#include "ops_dentry.h"
#include "ops_super.h"
#include "recovery.h"
#include "rgrp.h"
#include "super.h"
#include "sys.h"
#include "util.h"
+#include "log.h"
#define DO 0
#define UNDO 1
-extern struct dentry_operations gfs2_dops;
-
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
struct gfs2_sbd *sdp;
@@ -82,13 +82,15 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
INIT_LIST_HEAD(&sdp->sd_log_le_rg);
INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
+ INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
mutex_init(&sdp->sd_log_reserve_mutex);
INIT_LIST_HEAD(&sdp->sd_ail1_list);
INIT_LIST_HEAD(&sdp->sd_ail2_list);
init_rwsem(&sdp->sd_log_flush_lock);
- INIT_LIST_HEAD(&sdp->sd_log_flush_list);
+ atomic_set(&sdp->sd_log_in_flight, 0);
+ init_waitqueue_head(&sdp->sd_log_flush_wait);
INIT_LIST_HEAD(&sdp->sd_revoke_list);
@@ -145,7 +147,8 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
snprintf(sdp->sd_proto_name, GFS2_FSNAME_LEN, "%s", proto);
snprintf(sdp->sd_table_name, GFS2_FSNAME_LEN, "%s", table);
- while ((table = strchr(sdp->sd_table_name, '/')))
+ table = sdp->sd_table_name;
+ while ((table = strchr(table, '/')))
*table = '_';
out:
@@ -161,14 +164,6 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
if (undo)
goto fail_trans;
- p = kthread_run(gfs2_scand, sdp, "gfs2_scand");
- error = IS_ERR(p);
- if (error) {
- fs_err(sdp, "can't start scand thread: %d\n", error);
- return error;
- }
- sdp->sd_scand_process = p;
-
for (sdp->sd_glockd_num = 0;
sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd;
sdp->sd_glockd_num++) {
@@ -229,14 +224,13 @@ fail:
while (sdp->sd_glockd_num--)
kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
- kthread_stop(sdp->sd_scand_process);
return error;
}
static inline struct inode *gfs2_lookup_root(struct super_block *sb,
u64 no_addr)
{
- return gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+ return gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
}
static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
@@ -301,8 +295,9 @@ static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
fs_err(sdp, "can't get root dentry\n");
error = -ENOMEM;
iput(inode);
- }
- sb->s_root->d_op = &gfs2_dops;
+ } else
+ sb->s_root->d_op = &gfs2_dops;
+
out:
gfs2_glock_dq_uninit(&sb_gh);
return error;
@@ -368,7 +363,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
ip = GFS2_I(sdp->sd_jdesc->jd_inode);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT,
+ LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
&sdp->sd_jinode_gh);
if (error) {
fs_err(sdp, "can't acquire journal inode glock: %d\n",
@@ -818,7 +813,6 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
struct nameidata nd;
struct file_system_type *fstype;
struct super_block *sb = NULL, *s;
- struct list_head *l;
int error;
error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
@@ -830,8 +824,7 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
error = vfs_getattr(nd.mnt, nd.dentry, &stat);
fstype = get_fs_type("gfs2");
- list_for_each(l, &fstype->fs_supers) {
- s = list_entry(l, struct super_block, s_instances);
+ list_for_each_entry(s, &fstype->fs_supers, s_instances) {
if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
(S_ISDIR(stat.mode) && s == nd.dentry->d_inode->i_sb)) {
sb = s;
@@ -861,7 +854,7 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
error = -ENOENT;
goto error;
}
- sdp = (struct gfs2_sbd*) sb->s_fs_info;
+ sdp = sb->s_fs_info;
if (sdp->sd_vfs_meta) {
printk(KERN_WARNING "GFS2: gfs2meta mount already exists\n");
error = -EBUSY;
@@ -896,7 +889,10 @@ error:
static void gfs2_kill_sb(struct super_block *sb)
{
- gfs2_delete_debugfs_file(sb->s_fs_info);
+ if (sb->s_fs_info) {
+ gfs2_delete_debugfs_file(sb->s_fs_info);
+ gfs2_meta_syncfs(sb->s_fs_info);
+ }
kill_block_super(sb);
}
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 911c115b5c6..291f0c7eaa3 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -69,7 +69,7 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
break;
} else if (PTR_ERR(inode) != -EEXIST ||
- (nd->intent.open.flags & O_EXCL)) {
+ (nd && (nd->intent.open.flags & O_EXCL))) {
gfs2_holder_uninit(ghs);
return PTR_ERR(inode);
}
@@ -278,17 +278,25 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
- error = gfs2_glock_nq_m(3, ghs);
+ error = gfs2_glock_nq(ghs); /* parent */
if (error)
- goto out;
+ goto out_parent;
+
+ error = gfs2_glock_nq(ghs + 1); /* child */
+ if (error)
+ goto out_child;
+
+ error = gfs2_glock_nq(ghs + 2); /* rgrp */
+ if (error)
+ goto out_rgrp;
error = gfs2_unlink_ok(dip, &dentry->d_name, ip);
if (error)
- goto out_gunlock;
+ goto out_rgrp;
error = gfs2_trans_begin(sdp, 2*RES_DINODE + RES_LEAF + RES_RG_BIT, 0);
if (error)
- goto out_gunlock;
+ goto out_rgrp;
error = gfs2_dir_del(dip, &dentry->d_name);
if (error)
@@ -298,12 +306,15 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
out_end_trans:
gfs2_trans_end(sdp);
-out_gunlock:
- gfs2_glock_dq_m(3, ghs);
-out:
- gfs2_holder_uninit(ghs);
- gfs2_holder_uninit(ghs + 1);
+ gfs2_glock_dq(ghs + 2);
+out_rgrp:
gfs2_holder_uninit(ghs + 2);
+ gfs2_glock_dq(ghs + 1);
+out_child:
+ gfs2_holder_uninit(ghs + 1);
+ gfs2_glock_dq(ghs);
+out_parent:
+ gfs2_holder_uninit(ghs);
gfs2_glock_dq_uninit(&ri_gh);
return error;
}
@@ -894,12 +905,17 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
static int setattr_size(struct inode *inode, struct iattr *attr)
{
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
int error;
if (attr->ia_size != ip->i_di.di_size) {
- error = vmtruncate(inode, attr->ia_size);
+ error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
if (error)
return error;
+ error = vmtruncate(inode, attr->ia_size);
+ gfs2_trans_end(sdp);
+ if (error)
+ return error;
}
error = gfs2_truncatei(ip, attr->ia_size);
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 603d940f115..950f31460e8 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -92,7 +92,6 @@ static void gfs2_put_super(struct super_block *sb)
kthread_stop(sdp->sd_recoverd_process);
while (sdp->sd_glockd_num--)
kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
- kthread_stop(sdp->sd_scand_process);
if (!(sb->s_flags & MS_RDONLY)) {
error = gfs2_make_fs_ro(sdp);
@@ -456,12 +455,15 @@ static void gfs2_delete_inode(struct inode *inode)
}
error = gfs2_dinode_dealloc(ip);
- /*
- * Must do this before unlock to avoid trying to write back
- * potentially dirty data now that inode no longer exists
- * on disk.
- */
+ if (error)
+ goto out_unlock;
+
+ error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
+ if (error)
+ goto out_unlock;
+ /* Needs to be done before glock release & also in a transaction */
truncate_inode_pages(&inode->i_data, 0);
+ gfs2_trans_end(sdp);
out_unlock:
gfs2_glock_dq(&ip->i_iopen_gh);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 6e546ee8f3d..addb51e0f13 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -70,6 +70,7 @@ struct gfs2_quota_host {
u64 qu_limit;
u64 qu_warn;
s64 qu_value;
+ u32 qu_ll_next;
};
struct gfs2_quota_change_host {
@@ -580,6 +581,7 @@ static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
qu->qu_limit = be64_to_cpu(str->qu_limit);
qu->qu_warn = be64_to_cpu(str->qu_warn);
qu->qu_value = be64_to_cpu(str->qu_value);
+ qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
}
static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
@@ -589,6 +591,7 @@ static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
str->qu_limit = cpu_to_be64(qu->qu_limit);
str->qu_warn = cpu_to_be64(qu->qu_warn);
str->qu_value = cpu_to_be64(qu->qu_value);
+ str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
}
@@ -614,6 +617,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
s64 value;
int err = -EIO;
+ if (gfs2_is_stuffed(ip)) {
+ struct gfs2_alloc *al = NULL;
+ al = gfs2_alloc_get(ip);
+ /* just request 1 blk */
+ al->al_requested = 1;
+ gfs2_inplace_reserve(ip);
+ gfs2_unstuff_dinode(ip, NULL);
+ gfs2_inplace_release(ip);
+ gfs2_alloc_put(ip);
+ }
page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 5ada38c99a2..beb6c7ac008 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -469,7 +469,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
};
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP, &ji_gh);
+ LM_FLAG_NOEXP | GL_NOCACHE, &ji_gh);
if (error)
goto fail_gunlock_j;
} else {
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index ce48c4594ec..708c287e1d0 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -31,6 +31,7 @@
#include "inode.h"
#define BFITNOENT ((u32)~0)
+#define NO_BLOCK ((u64)~0)
/*
* These routines are used by the resource group routines (rgrp.c)
@@ -116,8 +117,7 @@ static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
* @buffer: the buffer that holds the bitmaps
* @buflen: the length (in bytes) of the buffer
* @goal: start search at this block's bit-pair (within @buffer)
- * @old_state: GFS2_BLKST_XXX the state of the block we're looking for;
- * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0)
+ * @old_state: GFS2_BLKST_XXX the state of the block we're looking for.
*
* Scope of @goal and returned block number is only within this bitmap buffer,
* not entire rgrp or filesystem. @buffer will be offset from the actual
@@ -137,9 +137,13 @@ static u32 gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
byte = buffer + (goal / GFS2_NBBY);
bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
end = buffer + buflen;
- alloc = (old_state & 1) ? 0 : 0x55;
+ alloc = (old_state == GFS2_BLKST_FREE) ? 0x55 : 0;
while (byte < end) {
+ /* If we're looking for a free block we can eliminate all
+ bitmap settings with 0x55, which represents four data
+ blocks in a row. If we're looking for a data block, we can
+ eliminate 0x00 which corresponds to four free blocks. */
if ((*byte & 0x55) == alloc) {
blk += (8 - bit) >> 1;
@@ -859,23 +863,28 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
{
struct inode *inode;
- u32 goal = 0;
+ u32 goal = 0, block;
u64 no_addr;
+ struct gfs2_sbd *sdp = rgd->rd_sbd;
for(;;) {
if (goal >= rgd->rd_data)
break;
- goal = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
- GFS2_BLKST_UNLINKED);
- if (goal == BFITNOENT)
+ down_write(&sdp->sd_log_flush_lock);
+ block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
+ GFS2_BLKST_UNLINKED);
+ up_write(&sdp->sd_log_flush_lock);
+ if (block == BFITNOENT)
break;
- no_addr = goal + rgd->rd_data0;
+ /* rgblk_search can return a block < goal, so we need to
+ keep it marching forward. */
+ no_addr = block + rgd->rd_data0;
goal++;
- if (no_addr < *last_unlinked)
+ if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
continue;
*last_unlinked = no_addr;
inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
- no_addr, -1);
+ no_addr, -1, 1);
if (!IS_ERR(inode))
return inode;
}
@@ -1152,7 +1161,7 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
struct gfs2_alloc *al = &ip->i_alloc;
struct inode *inode;
int error = 0;
- u64 last_unlinked = 0;
+ u64 last_unlinked = NO_BLOCK;
if (gfs2_assert_warn(sdp, al->al_requested))
return -EINVAL;
@@ -1289,7 +1298,9 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
allocatable block anywhere else, we want to be able wrap around and
search in the first part of our first-searched bit block. */
for (x = 0; x <= length; x++) {
- if (bi->bi_clone)
+ /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
+ bitmaps, so we must search the originals for that. */
+ if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset,
bi->bi_len, goal, old_state);
else
@@ -1305,9 +1316,7 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
goal = 0;
}
- if (old_state != new_state) {
- gfs2_assert_withdraw(rgd->rd_sbd, blk != BFITNOENT);
-
+ if (blk != BFITNOENT && old_state != new_state) {
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
bi->bi_len, blk, new_state);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a2da76b5ae4..dd3e737f528 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -58,7 +58,6 @@ void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_incore_log_blocks = 1024;
gt->gt_log_flush_secs = 60;
gt->gt_jindex_refresh_secs = 60;
- gt->gt_scand_secs = 15;
gt->gt_recoverd_secs = 60;
gt->gt_logd_secs = 1;
gt->gt_quotad_secs = 5;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c26c21b53c1..06e0b7768d9 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -222,7 +222,6 @@ static struct kobj_type gfs2_ktype = {
};
static struct kset gfs2_kset = {
- .kobj = {.name = "gfs2"},
.ktype = &gfs2_ktype,
};
@@ -442,7 +441,6 @@ TUNE_ATTR(quota_simul_sync, 1);
TUNE_ATTR(quota_cache_secs, 1);
TUNE_ATTR(stall_secs, 1);
TUNE_ATTR(statfs_quantum, 1);
-TUNE_ATTR_DAEMON(scand_secs, scand_process);
TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process);
TUNE_ATTR_DAEMON(logd_secs, logd_process);
TUNE_ATTR_DAEMON(quotad_secs, quotad_process);
@@ -464,7 +462,6 @@ static struct attribute *tune_attrs[] = {
&tune_attr_quota_cache_secs.attr,
&tune_attr_stall_secs.attr,
&tune_attr_statfs_quantum.attr,
- &tune_attr_scand_secs.attr,
&tune_attr_recoverd_secs.attr,
&tune_attr_logd_secs.attr,
&tune_attr_quotad_secs.attr,
@@ -553,6 +550,7 @@ int gfs2_sys_init(void)
{
gfs2_sys_margs = NULL;
spin_lock_init(&gfs2_sys_margs_lock);
+ kobject_set_name(&gfs2_kset.kobj, "gfs2");
kobj_set_kset_s(&gfs2_kset, fs_subsys);
return kset_register(&gfs2_kset);
}
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index f8dabf8446b..717983e2c2a 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -142,25 +142,25 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta)
lops_add(sdp, &bd->bd_le);
}
-void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, u64 blkno)
+void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
{
- struct gfs2_revoke *rv = kmalloc(sizeof(struct gfs2_revoke),
- GFP_NOFS | __GFP_NOFAIL);
- lops_init_le(&rv->rv_le, &gfs2_revoke_lops);
- rv->rv_blkno = blkno;
- lops_add(sdp, &rv->rv_le);
+ BUG_ON(!list_empty(&bd->bd_le.le_list));
+ BUG_ON(!list_empty(&bd->bd_ail_st_list));
+ BUG_ON(!list_empty(&bd->bd_ail_gl_list));
+ lops_init_le(&bd->bd_le, &gfs2_revoke_lops);
+ lops_add(sdp, &bd->bd_le);
}
void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno)
{
- struct gfs2_revoke *rv;
+ struct gfs2_bufdata *bd;
int found = 0;
gfs2_log_lock(sdp);
- list_for_each_entry(rv, &sdp->sd_log_le_revoke, rv_le.le_list) {
- if (rv->rv_blkno == blkno) {
- list_del(&rv->rv_le.le_list);
+ list_for_each_entry(bd, &sdp->sd_log_le_revoke, bd_le.le_list) {
+ if (bd->bd_blkno == blkno) {
+ list_del_init(&bd->bd_le.le_list);
gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
sdp->sd_log_num_revoke--;
found = 1;
@@ -172,7 +172,7 @@ void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno)
if (found) {
struct gfs2_trans *tr = current->journal_info;
- kfree(rv);
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
tr->tr_num_revoke_rm++;
}
}
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index 23d4cbe1de5..043d5f4b9c4 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -32,7 +32,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp);
void gfs2_trans_add_gl(struct gfs2_glock *gl);
void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta);
-void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, u64 blkno);
+void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno);
void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd);
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index af4ef808fa9..345798ebd36 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -17,6 +17,18 @@ ToDo/Notes:
happen is unclear however so it is worth waiting until someone hits
the problem.
+2.1.29 - Fix a deadlock at mount time.
+
+ - During mount the VFS holds s_umount lock on the superblock. So when
+ we try to empty the journal $LogFile contents by calling
+ ntfs_attr_set() when the machine does not have much memory and the
+ journal is large ntfs_attr_set() results in the VM trying to balance
+ dirty pages which in turn tries to that the s_umount lock and thus we
+ get a deadlock. The solution is to not use ntfs_attr_set() and
+ instead do the zeroing by hand at the block level rather than page
+ cache level.
+ - Fix sparse warnings.
+
2.1.28 - Fix a deadlock.
- Fix deadlock in fs/ntfs/inode.c::ntfs_put_inode(). Thanks to Sergey
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 82550838556..58b6be99254 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
unistr.o upcase.o
-EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.28\"
+EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\"
ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 6e5c2534f4b..cfdc7900d27 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -2,7 +2,7 @@
* aops.c - NTFS kernel address space operations and page cache handling.
* Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
@@ -396,7 +396,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
loff_t i_size;
struct inode *vi;
ntfs_inode *ni, *base_ni;
- u8 *kaddr;
+ u8 *addr;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *mrec;
unsigned long flags;
@@ -491,15 +491,15 @@ retry_readpage:
/* Race with shrinking truncate. */
attr_len = i_size;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page, KM_USER0);
/* Copy the data to the page. */
- memcpy(kaddr, (u8*)ctx->attr +
+ memcpy(addr, (u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
attr_len);
/* Zero the remainder of the page. */
- memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(addr, KM_USER0);
put_unm_err_out:
ntfs_attr_put_search_ctx(ctx);
unm_err_out:
@@ -1344,7 +1344,7 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
loff_t i_size;
struct inode *vi = page->mapping->host;
ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
- char *kaddr;
+ char *addr;
ntfs_attr_search_ctx *ctx = NULL;
MFT_RECORD *m = NULL;
u32 attr_len;
@@ -1484,14 +1484,14 @@ retry_writepage:
/* Shrinking cannot fail. */
BUG_ON(err);
}
- kaddr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page, KM_USER0);
/* Copy the data from the page to the mft record. */
memcpy((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
- kaddr, attr_len);
+ addr, attr_len);
/* Zero out of bounds area in the page cache page. */
- memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
- kunmap_atomic(kaddr, KM_USER0);
+ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ kunmap_atomic(addr, KM_USER0);
flush_dcache_page(page);
flush_dcache_mft_record_page(ctx->ntfs_ino);
/* We are done with the page. */
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 1c08fefe487..92dabdcf2b8 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1,7 +1,7 @@
/**
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
@@ -2500,7 +2500,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
struct page *page;
u8 *kaddr;
pgoff_t idx, end;
- unsigned int start_ofs, end_ofs, size;
+ unsigned start_ofs, end_ofs, size;
ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
(long long)ofs, (long long)cnt, val);
@@ -2548,6 +2548,8 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
kunmap_atomic(kaddr, KM_USER0);
set_page_dirty(page);
page_cache_release(page);
+ balance_dirty_pages_ratelimited(mapping);
+ cond_resched();
if (idx == end)
goto done;
idx++;
@@ -2604,6 +2606,8 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
kunmap_atomic(kaddr, KM_USER0);
set_page_dirty(page);
page_cache_release(page);
+ balance_dirty_pages_ratelimited(mapping);
+ cond_resched();
}
done:
ntfs_debug("Done.");
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index ffcc504a166..c814204d4ea 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1,7 +1,7 @@
/*
* file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
@@ -26,7 +26,6 @@
#include <linux/swap.h>
#include <linux/uio.h>
#include <linux/writeback.h>
-#include <linux/sched.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -362,7 +361,7 @@ static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
volatile char c;
/* Set @end to the first byte outside the last page we care about. */
- end = (const char __user*)PAGE_ALIGN((ptrdiff_t __user)uaddr + bytes);
+ end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
;
@@ -532,7 +531,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
blocksize_bits = vol->sb->s_blocksize_bits;
u = 0;
do {
- struct page *page = pages[u];
+ page = pages[u];
+ BUG_ON(!page);
/*
* create_empty_buffers() will create uptodate/dirty buffers if
* the page is uptodate/dirty.
@@ -1291,7 +1291,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
size_t bytes)
{
struct page **last_page = pages + nr_pages;
- char *kaddr;
+ char *addr;
size_t total = 0;
unsigned len;
int left;
@@ -1300,13 +1300,13 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
len = PAGE_CACHE_SIZE - ofs;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- left = __copy_from_user_inatomic(kaddr + ofs, buf, len);
- kunmap_atomic(kaddr, KM_USER0);
+ addr = kmap_atomic(*pages, KM_USER0);
+ left = __copy_from_user_inatomic(addr + ofs, buf, len);
+ kunmap_atomic(addr, KM_USER0);
if (unlikely(left)) {
/* Do it the slow way. */
- kaddr = kmap(*pages);
- left = __copy_from_user(kaddr + ofs, buf, len);
+ addr = kmap(*pages);
+ left = __copy_from_user(addr + ofs, buf, len);
kunmap(*pages);
if (unlikely(left))
goto err_out;
@@ -1408,26 +1408,26 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
size_t *iov_ofs, size_t bytes)
{
struct page **last_page = pages + nr_pages;
- char *kaddr;
+ char *addr;
size_t copied, len, total = 0;
do {
len = PAGE_CACHE_SIZE - ofs;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
+ addr = kmap_atomic(*pages, KM_USER0);
+ copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(addr, KM_USER0);
if (unlikely(copied != len)) {
/* Do it the slow way. */
- kaddr = kmap(*pages);
- copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
+ addr = kmap(*pages);
+ copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len);
/*
* Zero the rest of the target like __copy_from_user().
*/
- memset(kaddr + ofs + copied, 0, len - copied);
+ memset(addr + ofs + copied, 0, len - copied);
kunmap(*pages);
if (unlikely(copied != len))
goto err_out;
@@ -1735,8 +1735,6 @@ static int ntfs_commit_pages_after_write(struct page **pages,
read_unlock_irqrestore(&ni->size_lock, flags);
BUG_ON(initialized_size != i_size);
if (end > initialized_size) {
- unsigned long flags;
-
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = end;
i_size_write(vi, end);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index b532a730cec..e9da092e277 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -34,7 +34,6 @@
#include "dir.h"
#include "debug.h"
#include "inode.h"
-#include "attrib.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
@@ -2500,8 +2499,6 @@ retry_truncate:
/* Resize the attribute record to best fit the new attribute size. */
if (new_size < vol->mft_record_size &&
!ntfs_resident_attr_value_resize(m, a, new_size)) {
- unsigned long flags;
-
/* The resize succeeded! */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index acfed325f4e..d7932e95b1f 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -1,7 +1,7 @@
/*
* logfile.c - NTFS kernel journal handling. Part of the Linux-NTFS project.
*
- * Copyright (c) 2002-2005 Anton Altaparmakov
+ * Copyright (c) 2002-2007 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
@@ -724,24 +724,139 @@ bool ntfs_is_logfile_clean(struct inode *log_vi, const RESTART_PAGE_HEADER *rp)
*/
bool ntfs_empty_logfile(struct inode *log_vi)
{
- ntfs_volume *vol = NTFS_SB(log_vi->i_sb);
+ VCN vcn, end_vcn;
+ ntfs_inode *log_ni = NTFS_I(log_vi);
+ ntfs_volume *vol = log_ni->vol;
+ struct super_block *sb = vol->sb;
+ runlist_element *rl;
+ unsigned long flags;
+ unsigned block_size, block_size_bits;
+ int err;
+ bool should_wait = true;
ntfs_debug("Entering.");
- if (!NVolLogFileEmpty(vol)) {
- int err;
-
- err = ntfs_attr_set(NTFS_I(log_vi), 0, i_size_read(log_vi),
- 0xff);
- if (unlikely(err)) {
- ntfs_error(vol->sb, "Failed to fill $LogFile with "
- "0xff bytes (error code %i).", err);
- return false;
- }
- /* Set the flag so we do not have to do it again on remount. */
- NVolSetLogFileEmpty(vol);
+ if (NVolLogFileEmpty(vol)) {
+ ntfs_debug("Done.");
+ return true;
}
+ /*
+ * We cannot use ntfs_attr_set() because we may be still in the middle
+ * of a mount operation. Thus we do the emptying by hand by first
+ * zapping the page cache pages for the $LogFile/$DATA attribute and
+ * then emptying each of the buffers in each of the clusters specified
+ * by the runlist by hand.
+ */
+ block_size = sb->s_blocksize;
+ block_size_bits = sb->s_blocksize_bits;
+ vcn = 0;
+ read_lock_irqsave(&log_ni->size_lock, flags);
+ end_vcn = (log_ni->initialized_size + vol->cluster_size_mask) >>
+ vol->cluster_size_bits;
+ read_unlock_irqrestore(&log_ni->size_lock, flags);
+ truncate_inode_pages(log_vi->i_mapping, 0);
+ down_write(&log_ni->runlist.lock);
+ rl = log_ni->runlist.rl;
+ if (unlikely(!rl || vcn < rl->vcn || !rl->length)) {
+map_vcn:
+ err = ntfs_map_runlist_nolock(log_ni, vcn, NULL);
+ if (err) {
+ ntfs_error(sb, "Failed to map runlist fragment (error "
+ "%d).", -err);
+ goto err;
+ }
+ rl = log_ni->runlist.rl;
+ BUG_ON(!rl || vcn < rl->vcn || !rl->length);
+ }
+ /* Seek to the runlist element containing @vcn. */
+ while (rl->length && vcn >= rl[1].vcn)
+ rl++;
+ do {
+ LCN lcn;
+ sector_t block, end_block;
+ s64 len;
+
+ /*
+ * If this run is not mapped map it now and start again as the
+ * runlist will have been updated.
+ */
+ lcn = rl->lcn;
+ if (unlikely(lcn == LCN_RL_NOT_MAPPED)) {
+ vcn = rl->vcn;
+ goto map_vcn;
+ }
+ /* If this run is not valid abort with an error. */
+ if (unlikely(!rl->length || lcn < LCN_HOLE))
+ goto rl_err;
+ /* Skip holes. */
+ if (lcn == LCN_HOLE)
+ continue;
+ block = lcn << vol->cluster_size_bits >> block_size_bits;
+ len = rl->length;
+ if (rl[1].vcn > end_vcn)
+ len = end_vcn - rl->vcn;
+ end_block = (lcn + len) << vol->cluster_size_bits >>
+ block_size_bits;
+ /* Iterate over the blocks in the run and empty them. */
+ do {
+ struct buffer_head *bh;
+
+ /* Obtain the buffer, possibly not uptodate. */
+ bh = sb_getblk(sb, block);
+ BUG_ON(!bh);
+ /* Setup buffer i/o submission. */
+ lock_buffer(bh);
+ bh->b_end_io = end_buffer_write_sync;
+ get_bh(bh);
+ /* Set the entire contents of the buffer to 0xff. */
+ memset(bh->b_data, -1, block_size);
+ if (!buffer_uptodate(bh))
+ set_buffer_uptodate(bh);
+ if (buffer_dirty(bh))
+ clear_buffer_dirty(bh);
+ /*
+ * Submit the buffer and wait for i/o to complete but
+ * only for the first buffer so we do not miss really
+ * serious i/o errors. Once the first buffer has
+ * completed ignore errors afterwards as we can assume
+ * that if one buffer worked all of them will work.
+ */
+ submit_bh(WRITE, bh);
+ if (should_wait) {
+ should_wait = false;
+ wait_on_buffer(bh);
+ if (unlikely(!buffer_uptodate(bh)))
+ goto io_err;
+ }
+ brelse(bh);
+ } while (++block < end_block);
+ } while ((++rl)->vcn < end_vcn);
+ up_write(&log_ni->runlist.lock);
+ /*
+ * Zap the pages again just in case any got instantiated whilst we were
+ * emptying the blocks by hand. FIXME: We may not have completed
+ * writing to all the buffer heads yet so this may happen too early.
+ * We really should use a kernel thread to do the emptying
+ * asynchronously and then we can also set the volume dirty and output
+ * an error message if emptying should fail.
+ */
+ truncate_inode_pages(log_vi->i_mapping, 0);
+ /* Set the flag so we do not have to do it again on remount. */
+ NVolSetLogFileEmpty(vol);
ntfs_debug("Done.");
return true;
+io_err:
+ ntfs_error(sb, "Failed to write buffer. Unmount and run chkdsk.");
+ goto dirty_err;
+rl_err:
+ ntfs_error(sb, "Runlist is corrupt. Unmount and run chkdsk.");
+dirty_err:
+ NVolSetErrors(vol);
+ err = -EIO;
+err:
+ up_write(&log_ni->runlist.lock);
+ ntfs_error(sb, "Failed to fill $LogFile with 0xff bytes (error %d).",
+ -err);
+ return false;
}
#endif /* NTFS_RW */
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 9afd72c7ad0..56a9a6d25a2 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -1,7 +1,7 @@
/**
* runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2005 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002-2005 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
@@ -1714,7 +1714,7 @@ extend_hole:
sizeof(*rl));
/* Adjust the beginning of the tail if necessary. */
if (end > rl->vcn) {
- s64 delta = end - rl->vcn;
+ delta = end - rl->vcn;
rl->vcn = end;
rl->length -= delta;
/* Only adjust the lcn if it is real. */
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 778a850b463..4ba7f0bdc24 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -354,7 +354,6 @@ struct ocfs2_insert_type {
enum ocfs2_append_type ins_appending;
enum ocfs2_contig_type ins_contig;
int ins_contig_index;
- int ins_free_records;
int ins_tree_depth;
};
@@ -362,7 +361,6 @@ struct ocfs2_merge_ctxt {
enum ocfs2_contig_type c_contig_type;
int c_has_empty_extent;
int c_split_covers_rec;
- int c_used_tail_recs;
};
/*
@@ -2808,36 +2806,28 @@ static int ocfs2_try_to_merge_extent(struct inode *inode,
struct ocfs2_merge_ctxt *ctxt)
{
- int ret = 0, delete_tail_recs = 0;
+ int ret = 0;
struct ocfs2_extent_list *el = path_leaf_el(left_path);
struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
BUG_ON(ctxt->c_contig_type == CONTIG_NONE);
- if (ctxt->c_split_covers_rec) {
- delete_tail_recs++;
-
- if (ctxt->c_contig_type == CONTIG_LEFTRIGHT ||
- ctxt->c_has_empty_extent)
- delete_tail_recs++;
-
- if (ctxt->c_has_empty_extent) {
- /*
- * The merge code will need to create an empty
- * extent to take the place of the newly
- * emptied slot. Remove any pre-existing empty
- * extents - having more than one in a leaf is
- * illegal.
- */
- ret = ocfs2_rotate_tree_left(inode, handle, left_path,
- dealloc);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- split_index--;
- rec = &el->l_recs[split_index];
+ if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
+ /*
+ * The merge code will need to create an empty
+ * extent to take the place of the newly
+ * emptied slot. Remove any pre-existing empty
+ * extents - having more than one in a leaf is
+ * illegal.
+ */
+ ret = ocfs2_rotate_tree_left(inode, handle, left_path,
+ dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
+ split_index--;
+ rec = &el->l_recs[split_index];
}
if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) {
@@ -3593,6 +3583,7 @@ static int ocfs2_figure_insert_type(struct inode *inode,
struct buffer_head *di_bh,
struct buffer_head **last_eb_bh,
struct ocfs2_extent_rec *insert_rec,
+ int *free_records,
struct ocfs2_insert_type *insert)
{
int ret;
@@ -3633,7 +3624,7 @@ static int ocfs2_figure_insert_type(struct inode *inode,
* XXX: This test is simplistic, we can search for empty
* extent records too.
*/
- insert->ins_free_records = le16_to_cpu(el->l_count) -
+ *free_records = le16_to_cpu(el->l_count) -
le16_to_cpu(el->l_next_free_rec);
if (!insert->ins_tree_depth) {
@@ -3730,10 +3721,13 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
struct ocfs2_alloc_context *meta_ac)
{
int status;
+ int uninitialized_var(free_records);
struct buffer_head *last_eb_bh = NULL;
struct ocfs2_insert_type insert = {0, };
struct ocfs2_extent_rec rec;
+ BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL);
+
mlog(0, "add %u clusters at position %u to inode %llu\n",
new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -3752,7 +3746,7 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
rec.e_flags = flags;
status = ocfs2_figure_insert_type(inode, fe_bh, &last_eb_bh, &rec,
- &insert);
+ &free_records, &insert);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -3762,9 +3756,9 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
"Insert.contig_index: %d, Insert.free_records: %d, "
"Insert.tree_depth: %d\n",
insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
- insert.ins_free_records, insert.ins_tree_depth);
+ free_records, insert.ins_tree_depth);
- if (insert.ins_contig == CONTIG_NONE && insert.ins_free_records == 0) {
+ if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
status = ocfs2_grow_tree(inode, handle, fe_bh,
&insert.ins_tree_depth, &last_eb_bh,
meta_ac);
@@ -3847,26 +3841,17 @@ leftright:
if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
le16_to_cpu(rightmost_el->l_count)) {
- int old_depth = depth;
-
ret = ocfs2_grow_tree(inode, handle, di_bh, &depth, last_eb_bh,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
-
- if (old_depth != depth) {
- eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
- rightmost_el = &eb->h_list;
- }
}
memset(&insert, 0, sizeof(struct ocfs2_insert_type));
insert.ins_appending = APPEND_NONE;
insert.ins_contig = CONTIG_NONE;
- insert.ins_free_records = le16_to_cpu(rightmost_el->l_count)
- - le16_to_cpu(rightmost_el->l_next_free_rec);
insert.ins_tree_depth = depth;
insert_range = le32_to_cpu(split_rec.e_cpos) +
@@ -4015,11 +4000,6 @@ static int __ocfs2_mark_extent_written(struct inode *inode,
} else
rightmost_el = path_root_el(path);
- ctxt.c_used_tail_recs = le16_to_cpu(rightmost_el->l_next_free_rec);
- if (ctxt.c_used_tail_recs > 0 &&
- ocfs2_is_empty_extent(&rightmost_el->l_recs[0]))
- ctxt.c_used_tail_recs--;
-
if (rec->e_cpos == split_rec->e_cpos &&
rec->e_leaf_clusters == split_rec->e_leaf_clusters)
ctxt.c_split_covers_rec = 1;
@@ -4028,10 +4008,9 @@ static int __ocfs2_mark_extent_written(struct inode *inode,
ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
- mlog(0, "index: %d, contig: %u, used_tail_recs: %u, "
- "has_empty: %u, split_covers: %u\n", split_index,
- ctxt.c_contig_type, ctxt.c_used_tail_recs,
- ctxt.c_has_empty_extent, ctxt.c_split_covers_rec);
+ mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n",
+ split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent,
+ ctxt.c_split_covers_rec);
if (ctxt.c_contig_type == CONTIG_NONE) {
if (ctxt.c_split_covers_rec)
@@ -4180,27 +4159,18 @@ static int ocfs2_split_tree(struct inode *inode, struct buffer_head *di_bh,
if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
le16_to_cpu(rightmost_el->l_count)) {
- int old_depth = depth;
-
ret = ocfs2_grow_tree(inode, handle, di_bh, &depth, &last_eb_bh,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
-
- if (old_depth != depth) {
- eb = (struct ocfs2_extent_block *)last_eb_bh->b_data;
- rightmost_el = &eb->h_list;
- }
}
memset(&insert, 0, sizeof(struct ocfs2_insert_type));
insert.ins_appending = APPEND_NONE;
insert.ins_contig = CONTIG_NONE;
insert.ins_split = SPLIT_RIGHT;
- insert.ins_free_records = le16_to_cpu(rightmost_el->l_count)
- - le16_to_cpu(rightmost_el->l_next_free_rec);
insert.ins_tree_depth = depth;
ret = ocfs2_do_insert_extent(inode, handle, di_bh, &split_rec, &insert);
@@ -5665,12 +5635,50 @@ static int ocfs2_ordered_zero_func(handle_t *handle, struct buffer_head *bh)
return ocfs2_journal_dirty_data(handle, bh);
}
+static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
+ unsigned int from, unsigned int to,
+ struct page *page, int zero, u64 *phys)
+{
+ int ret, partial = 0;
+
+ ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
+ if (ret)
+ mlog_errno(ret);
+
+ if (zero)
+ zero_user_page(page, from, to - from, KM_USER0);
+
+ /*
+ * Need to set the buffers we zero'd into uptodate
+ * here if they aren't - ocfs2_map_page_blocks()
+ * might've skipped some
+ */
+ if (ocfs2_should_order_data(inode)) {
+ ret = walk_page_buffers(handle,
+ page_buffers(page),
+ from, to, &partial,
+ ocfs2_ordered_zero_func);
+ if (ret < 0)
+ mlog_errno(ret);
+ } else {
+ ret = walk_page_buffers(handle, page_buffers(page),
+ from, to, &partial,
+ ocfs2_writeback_zero_func);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+
+ if (!partial)
+ SetPageUptodate(page);
+
+ flush_dcache_page(page);
+}
+
static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
loff_t end, struct page **pages,
int numpages, u64 phys, handle_t *handle)
{
- int i, ret, partial = 0;
- void *kaddr;
+ int i;
struct page *page;
unsigned int from, to = PAGE_CACHE_SIZE;
struct super_block *sb = inode->i_sb;
@@ -5691,87 +5699,31 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
BUG_ON(from > PAGE_CACHE_SIZE);
BUG_ON(to > PAGE_CACHE_SIZE);
- ret = ocfs2_map_page_blocks(page, &phys, inode, from, to, 0);
- if (ret)
- mlog_errno(ret);
-
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + from, 0, to - from);
- kunmap_atomic(kaddr, KM_USER0);
-
- /*
- * Need to set the buffers we zero'd into uptodate
- * here if they aren't - ocfs2_map_page_blocks()
- * might've skipped some
- */
- if (ocfs2_should_order_data(inode)) {
- ret = walk_page_buffers(handle,
- page_buffers(page),
- from, to, &partial,
- ocfs2_ordered_zero_func);
- if (ret < 0)
- mlog_errno(ret);
- } else {
- ret = walk_page_buffers(handle, page_buffers(page),
- from, to, &partial,
- ocfs2_writeback_zero_func);
- if (ret < 0)
- mlog_errno(ret);
- }
-
- if (!partial)
- SetPageUptodate(page);
-
- flush_dcache_page(page);
+ ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
+ &phys);
start = (page->index + 1) << PAGE_CACHE_SHIFT;
}
out:
- if (pages) {
- for (i = 0; i < numpages; i++) {
- page = pages[i];
- unlock_page(page);
- mark_page_accessed(page);
- page_cache_release(page);
- }
- }
+ if (pages)
+ ocfs2_unlock_and_free_pages(pages, numpages);
}
static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num, u64 *phys)
+ struct page **pages, int *num)
{
- int i, numpages = 0, ret = 0;
- unsigned int ext_flags;
+ int numpages, ret = 0;
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
unsigned long index;
loff_t last_page_bytes;
- BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
BUG_ON(start > end);
- if (start == end)
- goto out;
-
BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
(end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
- ret = ocfs2_extent_map_get_blocks(inode, start >> sb->s_blocksize_bits,
- phys, NULL, &ext_flags);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- /* Tail is a hole. */
- if (*phys == 0)
- goto out;
-
- /* Tail is marked as unwritten, we can count on write to zero
- * in that case. */
- if (ext_flags & OCFS2_EXT_UNWRITTEN)
- goto out;
-
+ numpages = 0;
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_CACHE_SHIFT;
do {
@@ -5788,14 +5740,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
out:
if (ret != 0) {
- if (pages) {
- for (i = 0; i < numpages; i++) {
- if (pages[i]) {
- unlock_page(pages[i]);
- page_cache_release(pages[i]);
- }
- }
- }
+ if (pages)
+ ocfs2_unlock_and_free_pages(pages, numpages);
numpages = 0;
}
@@ -5816,18 +5762,20 @@ out:
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
u64 range_start, u64 range_end)
{
- int ret, numpages;
+ int ret = 0, numpages;
struct page **pages = NULL;
u64 phys;
+ unsigned int ext_flags;
+ struct super_block *sb = inode->i_sb;
/*
* File systems which don't support sparse files zero on every
* extend.
*/
- if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+ if (!ocfs2_sparse_alloc(OCFS2_SB(sb)))
return 0;
- pages = kcalloc(ocfs2_pages_per_cluster(inode->i_sb),
+ pages = kcalloc(ocfs2_pages_per_cluster(sb),
sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
ret = -ENOMEM;
@@ -5835,16 +5783,31 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
goto out;
}
- ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
- &numpages, &phys);
+ if (range_start == range_end)
+ goto out;
+
+ ret = ocfs2_extent_map_get_blocks(inode,
+ range_start >> sb->s_blocksize_bits,
+ &phys, NULL, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
- if (numpages == 0)
+ /*
+ * Tail is a hole, or is marked unwritten. In either case, we
+ * can count on read and write to return/push zero's.
+ */
+ if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
goto out;
+ ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
+ &numpages);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
numpages, phys, handle);
@@ -5865,6 +5828,178 @@ out:
return ret;
}
+static void ocfs2_zero_dinode_id2(struct inode *inode, struct ocfs2_dinode *di)
+{
+ unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits;
+
+ memset(&di->id2, 0, blocksize - offsetof(struct ocfs2_dinode, id2));
+}
+
+void ocfs2_dinode_new_extent_list(struct inode *inode,
+ struct ocfs2_dinode *di)
+{
+ ocfs2_zero_dinode_id2(inode, di);
+ di->id2.i_list.l_tree_depth = 0;
+ di->id2.i_list.l_next_free_rec = 0;
+ di->id2.i_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(inode->i_sb));
+}
+
+void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_inline_data *idata = &di->id2.i_data;
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ /*
+ * We clear the entire i_data structure here so that all
+ * fields can be properly initialized.
+ */
+ ocfs2_zero_dinode_id2(inode, di);
+
+ idata->id_count = cpu_to_le16(ocfs2_max_inline_data(inode->i_sb));
+}
+
+int ocfs2_convert_inline_data_to_extents(struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int ret, i, has_data, num_pages = 0;
+ handle_t *handle;
+ u64 uninitialized_var(block);
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_alloc_context *data_ac = NULL;
+ struct page **pages = NULL;
+ loff_t end = osb->s_clustersize;
+
+ has_data = i_size_read(inode) ? 1 : 0;
+
+ if (has_data) {
+ pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
+ sizeof(struct page *), GFP_NOFS);
+ if (pages == NULL) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ handle = ocfs2_start_trans(osb, OCFS2_INLINE_TO_EXTENTS_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ ret = ocfs2_journal_access(handle, inode, di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ if (has_data) {
+ u32 bit_off, num;
+ unsigned int page_end;
+ u64 phys;
+
+ ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
+ &num);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * Save two copies, one for insert, and one that can
+ * be changed by ocfs2_map_and_dirty_page() below.
+ */
+ block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
+
+ /*
+ * Non sparse file systems zero on extend, so no need
+ * to do that now.
+ */
+ if (!ocfs2_sparse_alloc(osb) &&
+ PAGE_CACHE_SIZE < osb->s_clustersize)
+ end = PAGE_CACHE_SIZE;
+
+ ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * This should populate the 1st page for us and mark
+ * it up to date.
+ */
+ ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ page_end = PAGE_CACHE_SIZE;
+ if (PAGE_CACHE_SIZE > osb->s_clustersize)
+ page_end = osb->s_clustersize;
+
+ for (i = 0; i < num_pages; i++)
+ ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
+ pages[i], i > 0, &phys);
+ }
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ ocfs2_dinode_new_extent_list(inode, di);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+ if (has_data) {
+ /*
+ * An error at this point should be extremely rare. If
+ * this proves to be false, we could always re-build
+ * the in-inode data from our pages.
+ */
+ ret = ocfs2_insert_extent(osb, handle, inode, di_bh,
+ 0, block, 1, 0, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ }
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out_unlock:
+ if (data_ac)
+ ocfs2_free_alloc_context(data_ac);
+
+out:
+ if (pages) {
+ ocfs2_unlock_and_free_pages(pages, num_pages);
+ kfree(pages);
+ }
+
+ return ret;
+}
+
/*
* It is expected, that by the time you call this function,
* inode->i_size and fe->i_size have been adjusted.
@@ -6090,6 +6225,81 @@ bail:
return status;
}
+/*
+ * 'start' is inclusive, 'end' is not.
+ */
+int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
+ unsigned int start, unsigned int end, int trunc)
+{
+ int ret;
+ unsigned int numbytes;
+ handle_t *handle;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inline_data *idata = &di->id2.i_data;
+
+ if (end > i_size_read(inode))
+ end = i_size_read(inode);
+
+ BUG_ON(start >= end);
+
+ if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
+ !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
+ !ocfs2_supports_inline_data(osb)) {
+ ocfs2_error(inode->i_sb,
+ "Inline data flags for inode %llu don't agree! "
+ "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ le16_to_cpu(di->i_dyn_features),
+ OCFS2_I(inode)->ip_dyn_features,
+ osb->s_feature_incompat);
+ ret = -EROFS;
+ goto out;
+ }
+
+ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access(handle, inode, di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ numbytes = end - start;
+ memset(idata->id_data + start, 0, numbytes);
+
+ /*
+ * No need to worry about the data page here - it's been
+ * truncated already and inline data doesn't need it for
+ * pushing zero's to disk, so we'll let readpage pick it up
+ * later.
+ */
+ if (trunc) {
+ i_size_write(inode, start);
+ di->i_size = cpu_to_le64(start);
+ }
+
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out:
+ return ret;
+}
+
static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
{
/*
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 990df48ae8d..42ff94bd801 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -62,6 +62,11 @@ static inline int ocfs2_extend_meta_needed(struct ocfs2_dinode *fe)
return le16_to_cpu(fe->id2.i_list.l_tree_depth) + 2;
}
+void ocfs2_dinode_new_extent_list(struct inode *inode, struct ocfs2_dinode *di);
+void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di);
+int ocfs2_convert_inline_data_to_extents(struct inode *inode,
+ struct buffer_head *di_bh);
+
int ocfs2_truncate_log_init(struct ocfs2_super *osb);
void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb);
void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
@@ -115,6 +120,8 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *fe_bh,
struct ocfs2_truncate_context *tc);
+int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
+ unsigned int start, unsigned int end, int trunc);
int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
u32 cpos, struct buffer_head **leaf_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f37f25c931f..34d10452c56 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -206,9 +206,70 @@ bail:
return err;
}
+int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+ struct buffer_head *di_bh)
+{
+ void *kaddr;
+ unsigned int size;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+
+ if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
+ ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ return -EROFS;
+ }
+
+ size = i_size_read(inode);
+
+ if (size > PAGE_CACHE_SIZE ||
+ size > ocfs2_max_inline_data(inode->i_sb)) {
+ ocfs2_error(inode->i_sb,
+ "Inode %llu has with inline data has bad size: %u",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno, size);
+ return -EROFS;
+ }
+
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (size)
+ memcpy(kaddr, di->id2.i_data.id_data, size);
+ /* Clear the remaining part of the page */
+ memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+
+ SetPageUptodate(page);
+
+ return 0;
+}
+
+static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
+{
+ int ret;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(!OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL);
+
+ ret = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &di_bh,
+ OCFS2_BH_CACHED, inode);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_read_inline_data(inode, page, di_bh);
+out:
+ unlock_page(page);
+
+ brelse(di_bh);
+ return ret;
+}
+
static int ocfs2_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
int ret, unlock = 1;
@@ -222,7 +283,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
goto out;
}
- if (down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem) == 0) {
+ if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
ret = AOP_TRUNCATED_PAGE;
goto out_meta_unlock;
}
@@ -252,7 +313,10 @@ static int ocfs2_readpage(struct file *file, struct page *page)
goto out_alloc;
}
- ret = block_read_full_page(page, ocfs2_get_block);
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ ret = ocfs2_readpage_inline(inode, page);
+ else
+ ret = block_read_full_page(page, ocfs2_get_block);
unlock = 0;
ocfs2_data_unlock(inode, 0);
@@ -301,12 +365,8 @@ int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
{
int ret;
- down_read(&OCFS2_I(inode)->ip_alloc_sem);
-
ret = block_prepare_write(page, from, to, ocfs2_get_block);
- up_read(&OCFS2_I(inode)->ip_alloc_sem);
-
return ret;
}
@@ -401,7 +461,9 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
down_read(&OCFS2_I(inode)->ip_alloc_sem);
}
- err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL);
+ if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
+ err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
+ NULL);
if (!INODE_JOURNAL(inode)) {
up_read(&OCFS2_I(inode)->ip_alloc_sem);
@@ -415,7 +477,6 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
goto bail;
}
-
bail:
status = err ? 0 : p_blkno;
@@ -570,6 +631,13 @@ static ssize_t ocfs2_direct_IO(int rw,
mlog_entry_void();
+ /*
+ * Fallback to buffered I/O if we see an inode without
+ * extents.
+ */
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return 0;
+
if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
/*
* We get PR data locks even for O_DIRECT. This
@@ -834,18 +902,22 @@ struct ocfs2_write_ctxt {
struct ocfs2_cached_dealloc_ctxt w_dealloc;
};
-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
{
int i;
- for(i = 0; i < wc->w_num_pages; i++) {
- if (wc->w_pages[i] == NULL)
- continue;
-
- unlock_page(wc->w_pages[i]);
- mark_page_accessed(wc->w_pages[i]);
- page_cache_release(wc->w_pages[i]);
+ for(i = 0; i < num_pages; i++) {
+ if (pages[i]) {
+ unlock_page(pages[i]);
+ mark_page_accessed(pages[i]);
+ page_cache_release(pages[i]);
+ }
}
+}
+
+static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+{
+ ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
brelse(wc->w_di_bh);
kfree(wc);
@@ -1360,6 +1432,160 @@ out:
return ret;
}
+static int ocfs2_write_begin_inline(struct address_space *mapping,
+ struct inode *inode,
+ struct ocfs2_write_ctxt *wc)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct page *page;
+ handle_t *handle;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
+
+ page = find_or_create_page(mapping, 0, GFP_NOFS);
+ if (!page) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+ /*
+ * If we don't set w_num_pages then this page won't get unlocked
+ * and freed on cleanup of the write context.
+ */
+ wc->w_pages[0] = wc->w_target_page = page;
+ wc->w_num_pages = 1;
+
+ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access(handle, inode, wc->w_di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ ocfs2_commit_trans(osb, handle);
+
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
+ ocfs2_set_inode_data_inline(inode, di);
+
+ if (!PageUptodate(page)) {
+ ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
+ if (ret) {
+ ocfs2_commit_trans(osb, handle);
+
+ goto out;
+ }
+ }
+
+ wc->w_handle = handle;
+out:
+ return ret;
+}
+
+int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
+{
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+
+ if (new_size < le16_to_cpu(di->id2.i_data.id_count))
+ return 1;
+ return 0;
+}
+
+static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
+ struct inode *inode, loff_t pos,
+ unsigned len, struct page *mmap_page,
+ struct ocfs2_write_ctxt *wc)
+{
+ int ret, written = 0;
+ loff_t end = pos + len;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n",
+ (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos,
+ oi->ip_dyn_features);
+
+ /*
+ * Handle inodes which already have inline data 1st.
+ */
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if (mmap_page == NULL &&
+ ocfs2_size_fits_inline_data(wc->w_di_bh, end))
+ goto do_inline_write;
+
+ /*
+ * The write won't fit - we have to give this inode an
+ * inline extent list now.
+ */
+ ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
+ if (ret)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * Check whether the inode can accept inline data.
+ */
+ if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
+ return 0;
+
+ /*
+ * Check whether the write can fit.
+ */
+ if (mmap_page || end > ocfs2_max_inline_data(inode->i_sb))
+ return 0;
+
+do_inline_write:
+ ret = ocfs2_write_begin_inline(mapping, inode, wc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * This signals to the caller that the data can be written
+ * inline.
+ */
+ written = 1;
+out:
+ return written ? written : ret;
+}
+
+/*
+ * This function only does anything for file systems which can't
+ * handle sparse files.
+ *
+ * What we want to do here is fill in any hole between the current end
+ * of allocation and the end of our write. That way the rest of the
+ * write path can treat it as an non-allocating write, which has no
+ * special case code for sparse/nonsparse files.
+ */
+static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
+ unsigned len,
+ struct ocfs2_write_ctxt *wc)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ loff_t newsize = pos + len;
+
+ if (ocfs2_sparse_alloc(osb))
+ return 0;
+
+ if (newsize <= i_size_read(inode))
+ return 0;
+
+ ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
+ if (ret)
+ mlog_errno(ret);
+
+ return ret;
+}
+
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
@@ -1381,6 +1607,25 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
return ret;
}
+ if (ocfs2_supports_inline_data(osb)) {
+ ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
+ mmap_page, wc);
+ if (ret == 1) {
+ ret = 0;
+ goto success;
+ }
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
&extents_to_split);
if (ret) {
@@ -1462,6 +1707,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
+success:
*pagep = wc->w_target_page;
*fsdata = wc;
return 0;
@@ -1529,6 +1775,31 @@ out_fail:
return ret;
}
+static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
+ unsigned len, unsigned *copied,
+ struct ocfs2_dinode *di,
+ struct ocfs2_write_ctxt *wc)
+{
+ void *kaddr;
+
+ if (unlikely(*copied < len)) {
+ if (!PageUptodate(wc->w_target_page)) {
+ *copied = 0;
+ return;
+ }
+ }
+
+ kaddr = kmap_atomic(wc->w_target_page, KM_USER0);
+ memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
+ kunmap_atomic(kaddr, KM_USER0);
+
+ mlog(0, "Data written to inode at offset %llu. "
+ "id_count = %u, copied = %u, i_dyn_features = 0x%x\n",
+ (unsigned long long)pos, *copied,
+ le16_to_cpu(di->id2.i_data.id_count),
+ le16_to_cpu(di->i_dyn_features));
+}
+
int ocfs2_write_end_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
@@ -1542,6 +1813,11 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
handle_t *handle = wc->w_handle;
struct page *tmppage;
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
+ goto out_write_size;
+ }
+
if (unlikely(copied < len)) {
if (!PageUptodate(wc->w_target_page))
copied = 0;
@@ -1579,6 +1855,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
block_commit_write(tmppage, from, to);
}
+out_write_size:
pos += copied;
if (pos > inode->i_size) {
i_size_write(inode, pos);
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 389579bd64e..113560877db 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -34,6 +34,8 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new);
+void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
+
int walk_page_buffers( handle_t *handle,
struct buffer_head *head,
unsigned from,
@@ -59,6 +61,10 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
struct page **pagep, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page);
+int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+ struct buffer_head *di_bh);
+int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size);
+
/* all ocfs2_dio_end_io()'s fault */
#define ocfs2_iocb_is_rw_locked(iocb) \
test_bit(0, (unsigned long *)&iocb->private)
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index e9e042b93db..a4882c8df94 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -143,7 +143,7 @@ static struct kobj_type mlog_ktype = {
};
static struct kset mlog_kset = {
- .kobj = {.name = "logmask", .ktype = &mlog_ktype},
+ .kobj = {.ktype = &mlog_ktype},
};
int mlog_sys_init(struct kset *o2cb_subsys)
@@ -156,6 +156,7 @@ int mlog_sys_init(struct kset *o2cb_subsys)
}
mlog_attr_ptrs[i] = NULL;
+ kobject_set_name(&mlog_kset.kobj, "logmask");
kobj_set_kset_s(&mlog_kset, *o2cb_subsys);
return kset_register(&mlog_kset);
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 0d5fdde959c..7453b70c1a1 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -55,10 +55,16 @@
#include "journal.h"
#include "namei.h"
#include "suballoc.h"
+#include "super.h"
#include "uptodate.h"
#include "buffer_head_io.h"
+#define NAMEI_RA_CHUNKS 2
+#define NAMEI_RA_BLOCKS 4
+#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
+
static unsigned char ocfs2_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
@@ -66,12 +72,614 @@ static unsigned char ocfs2_filetype_table[] = {
static int ocfs2_extend_dir(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
+ unsigned int blocks_wanted,
struct buffer_head **new_de_bh);
+static int ocfs2_do_extend_dir(struct super_block *sb,
+ handle_t *handle,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **new_bh);
+
/*
- * ocfs2_readdir()
+ * bh passed here can be an inode block or a dir data block, depending
+ * on the inode inline data flag.
+ */
+static int ocfs2_check_dir_entry(struct inode * dir,
+ struct ocfs2_dir_entry * de,
+ struct buffer_head * bh,
+ unsigned long offset)
+{
+ const char *error_msg = NULL;
+ const int rlen = le16_to_cpu(de->rec_len);
+
+ if (rlen < OCFS2_DIR_REC_LEN(1))
+ error_msg = "rec_len is smaller than minimal";
+ else if (rlen % 4 != 0)
+ error_msg = "rec_len % 4 != 0";
+ else if (rlen < OCFS2_DIR_REC_LEN(de->name_len))
+ error_msg = "rec_len is too small for name_len";
+ else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ error_msg = "directory entry across blocks";
+
+ if (error_msg != NULL)
+ mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
+ "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
+ offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
+ de->name_len);
+ return error_msg == NULL ? 1 : 0;
+}
+
+static inline int ocfs2_match(int len,
+ const char * const name,
+ struct ocfs2_dir_entry *de)
+{
+ if (len != de->name_len)
+ return 0;
+ if (!de->inode)
+ return 0;
+ return !memcmp(name, de->name, len);
+}
+
+/*
+ * Returns 0 if not found, -1 on failure, and 1 on success
+ */
+static int inline ocfs2_search_dirblock(struct buffer_head *bh,
+ struct inode *dir,
+ const char *name, int namelen,
+ unsigned long offset,
+ char *first_de,
+ unsigned int bytes,
+ struct ocfs2_dir_entry **res_dir)
+{
+ struct ocfs2_dir_entry *de;
+ char *dlimit, *de_buf;
+ int de_len;
+ int ret = 0;
+
+ mlog_entry_void();
+
+ de_buf = first_de;
+ dlimit = de_buf + bytes;
+
+ while (de_buf < dlimit) {
+ /* this code is executed quadratically often */
+ /* do minimal checking `by hand' */
+
+ de = (struct ocfs2_dir_entry *) de_buf;
+
+ if (de_buf + namelen <= dlimit &&
+ ocfs2_match(namelen, name, de)) {
+ /* found a match - just to be sure, do a full check */
+ if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
+ ret = -1;
+ goto bail;
+ }
+ *res_dir = de;
+ ret = 1;
+ goto bail;
+ }
+
+ /* prevent looping on a bad block */
+ de_len = le16_to_cpu(de->rec_len);
+ if (de_len <= 0) {
+ ret = -1;
+ goto bail;
+ }
+
+ de_buf += de_len;
+ offset += de_len;
+ }
+
+bail:
+ mlog_exit(ret);
+ return ret;
+}
+
+static struct buffer_head *ocfs2_find_entry_id(const char *name,
+ int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir)
+{
+ int ret, found;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_inline_data *data;
+
+ ret = ocfs2_read_block(OCFS2_SB(dir->i_sb), OCFS2_I(dir)->ip_blkno,
+ &di_bh, OCFS2_BH_CACHED, dir);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ data = &di->id2.i_data;
+
+ found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
+ data->id_data, i_size_read(dir), res_dir);
+ if (found == 1)
+ return di_bh;
+
+ brelse(di_bh);
+out:
+ return NULL;
+}
+
+struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir)
+{
+ struct super_block *sb;
+ struct buffer_head *bh_use[NAMEI_RA_SIZE];
+ struct buffer_head *bh, *ret = NULL;
+ unsigned long start, block, b;
+ int ra_max = 0; /* Number of bh's in the readahead
+ buffer, bh_use[] */
+ int ra_ptr = 0; /* Current index into readahead
+ buffer */
+ int num = 0;
+ int nblocks, i, err;
+
+ mlog_entry_void();
+
+ sb = dir->i_sb;
+
+ nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
+ start = OCFS2_I(dir)->ip_dir_start_lookup;
+ if (start >= nblocks)
+ start = 0;
+ block = start;
+
+restart:
+ do {
+ /*
+ * We deal with the read-ahead logic here.
+ */
+ if (ra_ptr >= ra_max) {
+ /* Refill the readahead buffer */
+ ra_ptr = 0;
+ b = block;
+ for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
+ /*
+ * Terminate if we reach the end of the
+ * directory and must wrap, or if our
+ * search has finished at this block.
+ */
+ if (b >= nblocks || (num && block == start)) {
+ bh_use[ra_max] = NULL;
+ break;
+ }
+ num++;
+
+ bh = ocfs2_bread(dir, b++, &err, 1);
+ bh_use[ra_max] = bh;
+ }
+ }
+ if ((bh = bh_use[ra_ptr++]) == NULL)
+ goto next;
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh)) {
+ /* read error, skip block & hope for the best */
+ ocfs2_error(dir->i_sb, "reading directory %llu, "
+ "offset %lu\n",
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ block);
+ brelse(bh);
+ goto next;
+ }
+ i = ocfs2_search_dirblock(bh, dir, name, namelen,
+ block << sb->s_blocksize_bits,
+ bh->b_data, sb->s_blocksize,
+ res_dir);
+ if (i == 1) {
+ OCFS2_I(dir)->ip_dir_start_lookup = block;
+ ret = bh;
+ goto cleanup_and_exit;
+ } else {
+ brelse(bh);
+ if (i < 0)
+ goto cleanup_and_exit;
+ }
+ next:
+ if (++block >= nblocks)
+ block = 0;
+ } while (block != start);
+
+ /*
+ * If the directory has grown while we were searching, then
+ * search the last part of the directory before giving up.
+ */
+ block = nblocks;
+ nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
+ if (block < nblocks) {
+ start = 0;
+ goto restart;
+ }
+
+cleanup_and_exit:
+ /* Clean up the read-ahead blocks */
+ for (; ra_ptr < ra_max; ra_ptr++)
+ brelse(bh_use[ra_ptr]);
+
+ mlog_exit_ptr(ret);
+ return ret;
+}
+
+/*
+ * Try to find an entry of the provided name within 'dir'.
*
+ * If nothing was found, NULL is returned. Otherwise, a buffer_head
+ * and pointer to the dir entry are passed back.
+ *
+ * Caller can NOT assume anything about the contents of the
+ * buffer_head - it is passed back only so that it can be passed into
+ * any one of the manipulation functions (add entry, delete entry,
+ * etc). As an example, bh in the extent directory case is a data
+ * block, in the inline-data case it actually points to an inode.
*/
-int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
+struct buffer_head *ocfs2_find_entry(const char *name, int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir)
+{
+ *res_dir = NULL;
+
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return ocfs2_find_entry_id(name, namelen, dir, res_dir);
+
+ return ocfs2_find_entry_el(name, namelen, dir, res_dir);
+}
+
+/*
+ * Update inode number and type of a previously found directory entry.
+ */
+int ocfs2_update_entry(struct inode *dir, handle_t *handle,
+ struct buffer_head *de_bh, struct ocfs2_dir_entry *de,
+ struct inode *new_entry_inode)
+{
+ int ret;
+
+ /*
+ * The same code works fine for both inline-data and extent
+ * based directories, so no need to split this up.
+ */
+
+ ret = ocfs2_journal_access(handle, dir, de_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
+ ocfs2_set_de_type(de, new_entry_inode->i_mode);
+
+ ocfs2_journal_dirty(handle, de_bh);
+
+out:
+ return ret;
+}
+
+static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh, char *first_de,
+ unsigned int bytes)
+{
+ struct ocfs2_dir_entry *de, *pde;
+ int i, status = -ENOENT;
+
+ mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh);
+
+ i = 0;
+ pde = NULL;
+ de = (struct ocfs2_dir_entry *) first_de;
+ while (i < bytes) {
+ if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ if (de == de_del) {
+ status = ocfs2_journal_access(handle, dir, bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ if (pde)
+ pde->rec_len =
+ cpu_to_le16(le16_to_cpu(pde->rec_len) +
+ le16_to_cpu(de->rec_len));
+ else
+ de->inode = 0;
+ dir->i_version++;
+ status = ocfs2_journal_dirty(handle, bh);
+ goto bail;
+ }
+ i += le16_to_cpu(de->rec_len);
+ pde = de;
+ de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
+ }
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static inline int ocfs2_delete_entry_id(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh)
+{
+ int ret;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_inline_data *data;
+
+ ret = ocfs2_read_block(OCFS2_SB(dir->i_sb), OCFS2_I(dir)->ip_blkno,
+ &di_bh, OCFS2_BH_CACHED, dir);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ data = &di->id2.i_data;
+
+ ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
+ i_size_read(dir));
+
+ brelse(di_bh);
+out:
+ return ret;
+}
+
+static inline int ocfs2_delete_entry_el(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh)
+{
+ return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
+ bh->b_size);
+}
+
+/*
+ * ocfs2_delete_entry deletes a directory entry by merging it with the
+ * previous entry
+ */
+int ocfs2_delete_entry(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh)
+{
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return ocfs2_delete_entry_id(handle, dir, de_del, bh);
+
+ return ocfs2_delete_entry_el(handle, dir, de_del, bh);
+}
+
+/*
+ * Check whether 'de' has enough room to hold an entry of
+ * 'new_rec_len' bytes.
+ */
+static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
+ unsigned int new_rec_len)
+{
+ unsigned int de_really_used;
+
+ /* Check whether this is an empty record with enough space */
+ if (le64_to_cpu(de->inode) == 0 &&
+ le16_to_cpu(de->rec_len) >= new_rec_len)
+ return 1;
+
+ /*
+ * Record might have free space at the end which we can
+ * use.
+ */
+ de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
+ if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
+ return 1;
+
+ return 0;
+}
+
+/* we don't always have a dentry for what we want to add, so people
+ * like orphan dir can call this instead.
+ *
+ * If you pass me insert_bh, I'll skip the search of the other dir
+ * blocks and put the record in there.
+ */
+int __ocfs2_add_entry(handle_t *handle,
+ struct inode *dir,
+ const char *name, int namelen,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh)
+{
+ unsigned long offset;
+ unsigned short rec_len;
+ struct ocfs2_dir_entry *de, *de1;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
+ struct super_block *sb = dir->i_sb;
+ int retval, status;
+ unsigned int size = sb->s_blocksize;
+ char *data_start = insert_bh->b_data;
+
+ mlog_entry_void();
+
+ if (!namelen)
+ return -EINVAL;
+
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ data_start = di->id2.i_data.id_data;
+ size = i_size_read(dir);
+
+ BUG_ON(insert_bh != parent_fe_bh);
+ }
+
+ rec_len = OCFS2_DIR_REC_LEN(namelen);
+ offset = 0;
+ de = (struct ocfs2_dir_entry *) data_start;
+ while (1) {
+ BUG_ON((char *)de >= (size + data_start));
+
+ /* These checks should've already been passed by the
+ * prepare function, but I guess we can leave them
+ * here anyway. */
+ if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
+ retval = -ENOENT;
+ goto bail;
+ }
+ if (ocfs2_match(namelen, name, de)) {
+ retval = -EEXIST;
+ goto bail;
+ }
+
+ if (ocfs2_dirent_would_fit(de, rec_len)) {
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
+ if (retval < 0) {
+ mlog_errno(retval);
+ goto bail;
+ }
+
+ status = ocfs2_journal_access(handle, dir, insert_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ /* By now the buffer is marked for journaling */
+ offset += le16_to_cpu(de->rec_len);
+ if (le64_to_cpu(de->inode)) {
+ de1 = (struct ocfs2_dir_entry *)((char *) de +
+ OCFS2_DIR_REC_LEN(de->name_len));
+ de1->rec_len =
+ cpu_to_le16(le16_to_cpu(de->rec_len) -
+ OCFS2_DIR_REC_LEN(de->name_len));
+ de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
+ de = de1;
+ }
+ de->file_type = OCFS2_FT_UNKNOWN;
+ if (blkno) {
+ de->inode = cpu_to_le64(blkno);
+ ocfs2_set_de_type(de, inode->i_mode);
+ } else
+ de->inode = 0;
+ de->name_len = namelen;
+ memcpy(de->name, name, namelen);
+
+ dir->i_version++;
+ status = ocfs2_journal_dirty(handle, insert_bh);
+ retval = 0;
+ goto bail;
+ }
+ offset += le16_to_cpu(de->rec_len);
+ de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
+ }
+
+ /* when you think about it, the assert above should prevent us
+ * from ever getting here. */
+ retval = -ENOSPC;
+bail:
+
+ mlog_exit(retval);
+ return retval;
+}
+
+static int ocfs2_dir_foreach_blk_id(struct inode *inode,
+ unsigned long *f_version,
+ loff_t *f_pos, void *priv,
+ filldir_t filldir, int *filldir_err)
+{
+ int ret, i, filldir_ret;
+ unsigned long offset = *f_pos;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_inline_data *data;
+ struct ocfs2_dir_entry *de;
+
+ ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), OCFS2_I(inode)->ip_blkno,
+ &di_bh, OCFS2_BH_CACHED, inode);
+ if (ret) {
+ mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ goto out;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ data = &di->id2.i_data;
+
+ while (*f_pos < i_size_read(inode)) {
+revalidate:
+ /* If the dir block has changed since the last call to
+ * readdir(2), then we might be pointing to an invalid
+ * dirent right now. Scan from the start of the block
+ * to make sure. */
+ if (*f_version != inode->i_version) {
+ for (i = 0; i < i_size_read(inode) && i < offset; ) {
+ de = (struct ocfs2_dir_entry *)
+ (data->id_data + i);
+ /* It's too expensive to do a full
+ * dirent test each time round this
+ * loop, but we do have to test at
+ * least that it is non-zero. A
+ * failure will be detected in the
+ * dirent test below. */
+ if (le16_to_cpu(de->rec_len) <
+ OCFS2_DIR_REC_LEN(1))
+ break;
+ i += le16_to_cpu(de->rec_len);
+ }
+ *f_pos = offset = i;
+ *f_version = inode->i_version;
+ }
+
+ de = (struct ocfs2_dir_entry *) (data->id_data + *f_pos);
+ if (!ocfs2_check_dir_entry(inode, de, di_bh, *f_pos)) {
+ /* On error, skip the f_pos to the end. */
+ *f_pos = i_size_read(inode);
+ goto out;
+ }
+ offset += le16_to_cpu(de->rec_len);
+ if (le64_to_cpu(de->inode)) {
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ unsigned long version = *f_version;
+ unsigned char d_type = DT_UNKNOWN;
+
+ if (de->file_type < OCFS2_FT_MAX)
+ d_type = ocfs2_filetype_table[de->file_type];
+
+ filldir_ret = filldir(priv, de->name,
+ de->name_len,
+ *f_pos,
+ le64_to_cpu(de->inode),
+ d_type);
+ if (filldir_ret) {
+ if (filldir_err)
+ *filldir_err = filldir_ret;
+ break;
+ }
+ if (version != *f_version)
+ goto revalidate;
+ }
+ *f_pos += le16_to_cpu(de->rec_len);
+ }
+
+out:
+ brelse(di_bh);
+
+ return 0;
+}
+
+static int ocfs2_dir_foreach_blk_el(struct inode *inode,
+ unsigned long *f_version,
+ loff_t *f_pos, void *priv,
+ filldir_t filldir, int *filldir_err)
{
int error = 0;
unsigned long offset, blk, last_ra_blk = 0;
@@ -79,45 +687,23 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
struct buffer_head * bh, * tmp;
struct ocfs2_dir_entry * de;
int err;
- struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block * sb = inode->i_sb;
unsigned int ra_sectors = 16;
- int lock_level = 0;
-
- mlog_entry("dirino=%llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
stored = 0;
bh = NULL;
- error = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
- if (lock_level && error >= 0) {
- /* We release EX lock which used to update atime
- * and get PR lock again to reduce contention
- * on commonly accessed directories. */
- ocfs2_meta_unlock(inode, 1);
- lock_level = 0;
- error = ocfs2_meta_lock(inode, NULL, 0);
- }
- if (error < 0) {
- if (error != -ENOENT)
- mlog_errno(error);
- /* we haven't got any yet, so propagate the error. */
- stored = error;
- goto bail_nolock;
- }
+ offset = (*f_pos) & (sb->s_blocksize - 1);
- offset = filp->f_pos & (sb->s_blocksize - 1);
-
- while (!error && !stored && filp->f_pos < i_size_read(inode)) {
- blk = (filp->f_pos) >> sb->s_blocksize_bits;
+ while (!error && !stored && *f_pos < i_size_read(inode)) {
+ blk = (*f_pos) >> sb->s_blocksize_bits;
bh = ocfs2_bread(inode, blk, &err, 0);
if (!bh) {
mlog(ML_ERROR,
"directory #%llu contains a hole at offset %lld\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
- filp->f_pos);
- filp->f_pos += sb->s_blocksize - offset;
+ *f_pos);
+ *f_pos += sb->s_blocksize - offset;
continue;
}
@@ -143,7 +729,7 @@ revalidate:
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (filp->f_version != inode->i_version) {
+ if (*f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ocfs2_dir_entry *) (bh->b_data + i);
/* It's too expensive to do a full
@@ -158,21 +744,20 @@ revalidate:
i += le16_to_cpu(de->rec_len);
}
offset = i;
- filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+ *f_pos = ((*f_pos) & ~(sb->s_blocksize - 1))
| offset;
- filp->f_version = inode->i_version;
+ *f_version = inode->i_version;
}
- while (!error && filp->f_pos < i_size_read(inode)
+ while (!error && *f_pos < i_size_read(inode)
&& offset < sb->s_blocksize) {
de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
/* On error, skip the f_pos to the
next block. */
- filp->f_pos = (filp->f_pos |
- (sb->s_blocksize - 1)) + 1;
+ *f_pos = ((*f_pos) | (sb->s_blocksize - 1)) + 1;
brelse(bh);
- goto bail;
+ goto out;
}
offset += le16_to_cpu(de->rec_len);
if (le64_to_cpu(de->inode)) {
@@ -183,36 +768,109 @@ revalidate:
* not the directory has been modified
* during the copy operation.
*/
- unsigned long version = filp->f_version;
+ unsigned long version = *f_version;
unsigned char d_type = DT_UNKNOWN;
if (de->file_type < OCFS2_FT_MAX)
d_type = ocfs2_filetype_table[de->file_type];
- error = filldir(dirent, de->name,
+ error = filldir(priv, de->name,
de->name_len,
- filp->f_pos,
- ino_from_blkno(sb, le64_to_cpu(de->inode)),
+ *f_pos,
+ le64_to_cpu(de->inode),
d_type);
- if (error)
+ if (error) {
+ if (filldir_err)
+ *filldir_err = error;
break;
- if (version != filp->f_version)
+ }
+ if (version != *f_version)
goto revalidate;
stored ++;
}
- filp->f_pos += le16_to_cpu(de->rec_len);
+ *f_pos += le16_to_cpu(de->rec_len);
}
offset = 0;
brelse(bh);
}
stored = 0;
-bail:
+out:
+ return stored;
+}
+
+static int ocfs2_dir_foreach_blk(struct inode *inode, unsigned long *f_version,
+ loff_t *f_pos, void *priv, filldir_t filldir,
+ int *filldir_err)
+{
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return ocfs2_dir_foreach_blk_id(inode, f_version, f_pos, priv,
+ filldir, filldir_err);
+
+ return ocfs2_dir_foreach_blk_el(inode, f_version, f_pos, priv, filldir,
+ filldir_err);
+}
+
+/*
+ * This is intended to be called from inside other kernel functions,
+ * so we fake some arguments.
+ */
+int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
+ filldir_t filldir)
+{
+ int ret = 0, filldir_err = 0;
+ unsigned long version = inode->i_version;
+
+ while (*f_pos < i_size_read(inode)) {
+ ret = ocfs2_dir_foreach_blk(inode, &version, f_pos, priv,
+ filldir, &filldir_err);
+ if (ret || filldir_err)
+ break;
+ }
+
+ if (ret > 0)
+ ret = -EIO;
+
+ return 0;
+}
+
+/*
+ * ocfs2_readdir()
+ *
+ */
+int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+ int error = 0;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int lock_level = 0;
+
+ mlog_entry("dirino=%llu\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+
+ error = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
+ if (lock_level && error >= 0) {
+ /* We release EX lock which used to update atime
+ * and get PR lock again to reduce contention
+ * on commonly accessed directories. */
+ ocfs2_meta_unlock(inode, 1);
+ lock_level = 0;
+ error = ocfs2_meta_lock(inode, NULL, 0);
+ }
+ if (error < 0) {
+ if (error != -ENOENT)
+ mlog_errno(error);
+ /* we haven't got any yet, so propagate the error. */
+ goto bail_nolock;
+ }
+
+ error = ocfs2_dir_foreach_blk(inode, &filp->f_version, &filp->f_pos,
+ dirent, filldir, NULL);
+
ocfs2_meta_unlock(inode, lock_level);
bail_nolock:
- mlog_exit(stored);
+ mlog_exit(error);
- return stored;
+ return error;
}
/*
@@ -252,6 +910,23 @@ leave:
return status;
}
+/*
+ * Convenience function for callers which just want the block number
+ * mapped to a name and don't require the full dirent info, etc.
+ */
+int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
+ int namelen, u64 *blkno)
+{
+ int ret;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_dir_entry *dirent = NULL;
+
+ ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &bh, &dirent);
+ brelse(bh);
+
+ return ret;
+}
+
/* Check for a name within a directory.
*
* Return 0 if the name does not exist
@@ -284,77 +959,414 @@ bail:
return ret;
}
+struct ocfs2_empty_dir_priv {
+ unsigned seen_dot;
+ unsigned seen_dot_dot;
+ unsigned seen_other;
+};
+static int ocfs2_empty_dir_filldir(void *priv, const char *name, int name_len,
+ loff_t pos, u64 ino, unsigned type)
+{
+ struct ocfs2_empty_dir_priv *p = priv;
+
+ /*
+ * Check the positions of "." and ".." records to be sure
+ * they're in the correct place.
+ */
+ if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
+ p->seen_dot = 1;
+ return 0;
+ }
+
+ if (name_len == 2 && !strncmp("..", name, 2) &&
+ pos == OCFS2_DIR_REC_LEN(1)) {
+ p->seen_dot_dot = 1;
+ return 0;
+ }
+
+ p->seen_other = 1;
+ return 1;
+}
/*
* routine to check that the specified directory is empty (for rmdir)
+ *
+ * Returns 1 if dir is empty, zero otherwise.
*/
int ocfs2_empty_dir(struct inode *inode)
{
- unsigned long offset;
- struct buffer_head * bh;
- struct ocfs2_dir_entry * de, * de1;
- struct super_block * sb;
- int err;
+ int ret;
+ loff_t start = 0;
+ struct ocfs2_empty_dir_priv priv;
+
+ memset(&priv, 0, sizeof(priv));
+
+ ret = ocfs2_dir_foreach(inode, &start, &priv, ocfs2_empty_dir_filldir);
+ if (ret)
+ mlog_errno(ret);
- sb = inode->i_sb;
- if ((i_size_read(inode) <
- (OCFS2_DIR_REC_LEN(1) + OCFS2_DIR_REC_LEN(2))) ||
- !(bh = ocfs2_bread(inode, 0, &err, 0))) {
- mlog(ML_ERROR, "bad directory (dir #%llu) - no data block\n",
+ if (!priv.seen_dot || !priv.seen_dot_dot) {
+ mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
+ /*
+ * XXX: Is it really safe to allow an unlink to continue?
+ */
return 1;
}
- de = (struct ocfs2_dir_entry *) bh->b_data;
- de1 = (struct ocfs2_dir_entry *)
- ((char *)de + le16_to_cpu(de->rec_len));
- if ((le64_to_cpu(de->inode) != OCFS2_I(inode)->ip_blkno) ||
- !le64_to_cpu(de1->inode) ||
- strcmp(".", de->name) ||
- strcmp("..", de1->name)) {
- mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
- brelse(bh);
- return 1;
+ return !priv.seen_other;
+}
+
+static void ocfs2_fill_initial_dirents(struct inode *inode,
+ struct inode *parent,
+ char *start, unsigned int size)
+{
+ struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
+
+ de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
+ de->name_len = 1;
+ de->rec_len =
+ cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
+ strcpy(de->name, ".");
+ ocfs2_set_de_type(de, S_IFDIR);
+
+ de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
+ de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
+ de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
+ de->name_len = 2;
+ strcpy(de->name, "..");
+ ocfs2_set_de_type(de, S_IFDIR);
+}
+
+/*
+ * This works together with code in ocfs2_mknod_locked() which sets
+ * the inline-data flag and initializes the inline-data section.
+ */
+static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int ret;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inline_data *data = &di->id2.i_data;
+ unsigned int size = le16_to_cpu(data->id_count);
+
+ ret = ocfs2_journal_access(handle, inode, di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
- offset = le16_to_cpu(de->rec_len) + le16_to_cpu(de1->rec_len);
- de = (struct ocfs2_dir_entry *)((char *)de1 + le16_to_cpu(de1->rec_len));
- while (offset < i_size_read(inode) ) {
- if (!bh || (void *)de >= (void *)(bh->b_data + sb->s_blocksize)) {
- brelse(bh);
- bh = ocfs2_bread(inode,
- offset >> sb->s_blocksize_bits, &err, 0);
- if (!bh) {
- mlog(ML_ERROR, "dir %llu has a hole at %lu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, offset);
- offset += sb->s_blocksize;
- continue;
- }
- de = (struct ocfs2_dir_entry *) bh->b_data;
- }
- if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
- brelse(bh);
- return 1;
+
+ ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
+
+ ocfs2_journal_dirty(handle, di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ i_size_write(inode, size);
+ inode->i_nlink = 2;
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+
+ ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
+ if (ret < 0)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
+static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *data_ac)
+{
+ int status;
+ struct buffer_head *new_bh = NULL;
+
+ mlog_entry_void();
+
+ status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
+ data_ac, NULL, &new_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_set_new_buffer_uptodate(inode, new_bh);
+
+ status = ocfs2_journal_access(handle, inode, new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ memset(new_bh->b_data, 0, osb->sb->s_blocksize);
+
+ ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data,
+ osb->sb->s_blocksize);
+
+ status = ocfs2_journal_dirty(handle, new_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ i_size_write(inode, inode->i_sb->s_blocksize);
+ inode->i_nlink = 2;
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = 0;
+bail:
+ if (new_bh)
+ brelse(new_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_fill_new_dir(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *data_ac)
+{
+ BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
+
+ return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
+ data_ac);
+}
+
+static void ocfs2_expand_last_dirent(char *start, unsigned int old_size,
+ unsigned int new_size)
+{
+ struct ocfs2_dir_entry *de;
+ struct ocfs2_dir_entry *prev_de;
+ char *de_buf, *limit;
+ unsigned int bytes = new_size - old_size;
+
+ limit = start + old_size;
+ de_buf = start;
+ de = (struct ocfs2_dir_entry *)de_buf;
+ do {
+ prev_de = de;
+ de_buf += le16_to_cpu(de->rec_len);
+ de = (struct ocfs2_dir_entry *)de_buf;
+ } while (de_buf < limit);
+
+ le16_add_cpu(&prev_de->rec_len, bytes);
+}
+
+/*
+ * We allocate enough clusters to fulfill "blocks_wanted", but set
+ * i_size to exactly one block. Ocfs2_extend_dir() will handle the
+ * rest automatically for us.
+ *
+ * *first_block_bh is a pointer to the 1st data block allocated to the
+ * directory.
+ */
+static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
+ unsigned int blocks_wanted,
+ struct buffer_head **first_block_bh)
+{
+ int ret, credits = OCFS2_INLINE_TO_EXTENTS_CREDITS;
+ u32 alloc, bit_off, len;
+ struct super_block *sb = dir->i_sb;
+ u64 blkno, bytes = blocks_wanted << sb->s_blocksize_bits;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+ struct ocfs2_inode_info *oi = OCFS2_I(dir);
+ struct ocfs2_alloc_context *data_ac;
+ struct buffer_head *dirdata_bh = NULL;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ handle_t *handle;
+
+ alloc = ocfs2_clusters_for_bytes(sb, bytes);
+
+ /*
+ * We should never need more than 2 clusters for this -
+ * maximum dirent size is far less than one block. In fact,
+ * the only time we'd need more than one cluster is if
+ * blocksize == clustersize and the dirent won't fit in the
+ * extra space that the expansion to a single block gives. As
+ * of today, that only happens on 4k/4k file systems.
+ */
+ BUG_ON(alloc > 2);
+
+ ret = ocfs2_reserve_clusters(osb, alloc, &data_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ down_write(&oi->ip_alloc_sem);
+
+ /*
+ * Prepare for worst case allocation scenario of two seperate
+ * extents.
+ */
+ if (alloc == 2)
+ credits += OCFS2_SUBALLOC_ALLOC;
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_sem;
+ }
+
+ /*
+ * Try to claim as many clusters as the bitmap can give though
+ * if we only get one now, that's enough to continue. The rest
+ * will be claimed after the conversion to extents.
+ */
+ ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * Operations are carefully ordered so that we set up the new
+ * data block first. The conversion from inline data to
+ * extents follows.
+ */
+ blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
+ dirdata_bh = sb_getblk(sb, blkno);
+ if (!dirdata_bh) {
+ ret = -EIO;
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ocfs2_set_new_buffer_uptodate(dir, dirdata_bh);
+
+ ret = ocfs2_journal_access(handle, dir, dirdata_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
+ memset(dirdata_bh->b_data + i_size_read(dir), 0,
+ sb->s_blocksize - i_size_read(dir));
+ ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir),
+ sb->s_blocksize);
+
+ ret = ocfs2_journal_dirty(handle, dirdata_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * Set extent, i_size, etc on the directory. After this, the
+ * inode should contain the same exact dirents as before and
+ * be fully accessible from system calls.
+ *
+ * We let the later dirent insert modify c/mtime - to the user
+ * the data hasn't changed.
+ */
+ ret = ocfs2_journal_access(handle, dir, di_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ ocfs2_dinode_new_extent_list(dir, di);
+
+ i_size_write(dir, sb->s_blocksize);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+
+ di->i_size = cpu_to_le64(sb->s_blocksize);
+ di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec);
+ dir->i_blocks = ocfs2_inode_sector_count(dir);
+
+ /*
+ * This should never fail as our extent list is empty and all
+ * related blocks have been journaled already.
+ */
+ ret = ocfs2_insert_extent(osb, handle, dir, di_bh, 0, blkno, len, 0,
+ NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_dirty(handle, di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * We asked for two clusters, but only got one in the 1st
+ * pass. Claim the 2nd cluster as a separate extent.
+ */
+ if (alloc > len) {
+ ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
+ &len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
}
- if (le64_to_cpu(de->inode)) {
- brelse(bh);
- return 0;
+ blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
+
+ ret = ocfs2_insert_extent(osb, handle, dir, di_bh, 1, blkno,
+ len, 0, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
- offset += le16_to_cpu(de->rec_len);
- de = (struct ocfs2_dir_entry *)
- ((char *)de + le16_to_cpu(de->rec_len));
}
- brelse(bh);
- return 1;
+
+ *first_block_bh = dirdata_bh;
+ dirdata_bh = NULL;
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out_sem:
+ up_write(&oi->ip_alloc_sem);
+
+out:
+ if (data_ac)
+ ocfs2_free_alloc_context(data_ac);
+
+ brelse(dirdata_bh);
+
+ return ret;
}
/* returns a bh of the 1st new block in the allocation. */
-int ocfs2_do_extend_dir(struct super_block *sb,
- handle_t *handle,
- struct inode *dir,
- struct buffer_head *parent_fe_bh,
- struct ocfs2_alloc_context *data_ac,
- struct ocfs2_alloc_context *meta_ac,
- struct buffer_head **new_bh)
+static int ocfs2_do_extend_dir(struct super_block *sb,
+ handle_t *handle,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **new_bh)
{
int status;
int extend;
@@ -396,10 +1408,18 @@ bail:
return status;
}
-/* assumes you already have a cluster lock on the directory. */
+/*
+ * Assumes you already have a cluster lock on the directory.
+ *
+ * 'blocks_wanted' is only used if we have an inline directory which
+ * is to be turned into an extent based one. The size of the dirent to
+ * insert might be larger than the space gained by growing to just one
+ * block, so we may have to grow the inode by two blocks in that case.
+ */
static int ocfs2_extend_dir(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
+ unsigned int blocks_wanted,
struct buffer_head **new_de_bh)
{
int status = 0;
@@ -415,6 +1435,38 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
mlog_entry_void();
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
+ blocks_wanted, &new_bh);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (blocks_wanted == 1) {
+ /*
+ * If the new dirent will fit inside the space
+ * created by pushing out to one block, then
+ * we can complete the operation
+ * here. Otherwise we have to expand i_size
+ * and format the 2nd block below.
+ */
+ BUG_ON(new_bh == NULL);
+ goto bail_bh;
+ }
+
+ /*
+ * Get rid of 'new_bh' - we want to format the 2nd
+ * data block and return that instead.
+ */
+ brelse(new_bh);
+ new_bh = NULL;
+
+ dir_i_size = i_size_read(dir);
+ credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
+ goto do_extend;
+ }
+
dir_i_size = i_size_read(dir);
mlog(0, "extending dir %llu (i_size = %lld)\n",
(unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size);
@@ -452,6 +1504,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
}
+do_extend:
down_write(&OCFS2_I(dir)->ip_alloc_sem);
drop_alloc_sem = 1;
@@ -497,6 +1550,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
goto bail;
}
+bail_bh:
*new_de_bh = new_bh;
get_bh(*new_de_bh);
bail:
@@ -517,41 +1571,71 @@ bail:
return status;
}
-/*
- * Search the dir for a good spot, extending it if necessary. The
- * block containing an appropriate record is returned in ret_de_bh.
- */
-int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
- struct inode *dir,
- struct buffer_head *parent_fe_bh,
- const char *name,
- int namelen,
- struct buffer_head **ret_de_bh)
+static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
+ const char *name, int namelen,
+ struct buffer_head **ret_de_bh,
+ unsigned int *blocks_wanted)
{
- unsigned long offset;
- struct buffer_head * bh = NULL;
- unsigned short rec_len;
- struct ocfs2_dinode *fe;
- struct ocfs2_dir_entry *de;
- struct super_block *sb;
- int status;
+ int ret;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_dir_entry *de, *last_de = NULL;
+ char *de_buf, *limit;
+ unsigned long offset = 0;
+ unsigned int rec_len, new_rec_len;
+
+ de_buf = di->id2.i_data.id_data;
+ limit = de_buf + i_size_read(dir);
+ rec_len = OCFS2_DIR_REC_LEN(namelen);
- mlog_entry_void();
+ while (de_buf < limit) {
+ de = (struct ocfs2_dir_entry *)de_buf;
- mlog(0, "getting ready to insert namelen %d into dir %llu\n",
- namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno);
+ if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
+ ret = -ENOENT;
+ goto out;
+ }
+ if (ocfs2_match(namelen, name, de)) {
+ ret = -EEXIST;
+ goto out;
+ }
+ if (ocfs2_dirent_would_fit(de, rec_len)) {
+ /* Ok, we found a spot. Return this bh and let
+ * the caller actually fill it in. */
+ *ret_de_bh = di_bh;
+ get_bh(*ret_de_bh);
+ ret = 0;
+ goto out;
+ }
- BUG_ON(!S_ISDIR(dir->i_mode));
- fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
- BUG_ON(le64_to_cpu(fe->i_size) != i_size_read(dir));
+ last_de = de;
+ de_buf += le16_to_cpu(de->rec_len);
+ offset += le16_to_cpu(de->rec_len);
+ }
- sb = dir->i_sb;
+ /*
+ * We're going to require expansion of the directory - figure
+ * out how many blocks we'll need so that a place for the
+ * dirent can be found.
+ */
+ *blocks_wanted = 1;
+ new_rec_len = le16_to_cpu(last_de->rec_len) + (dir->i_sb->s_blocksize - i_size_read(dir));
+ if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
+ *blocks_wanted = 2;
+
+ ret = -ENOSPC;
+out:
+ return ret;
+}
- if (!namelen) {
- status = -EINVAL;
- mlog_errno(status);
- goto bail;
- }
+static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
+ int namelen, struct buffer_head **ret_de_bh)
+{
+ unsigned long offset;
+ struct buffer_head *bh = NULL;
+ unsigned short rec_len;
+ struct ocfs2_dir_entry *de;
+ struct super_block *sb = dir->i_sb;
+ int status;
bh = ocfs2_bread(dir, 0, &status, 0);
if (!bh) {
@@ -568,17 +1652,11 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
bh = NULL;
if (i_size_read(dir) <= offset) {
- status = ocfs2_extend_dir(osb,
- dir,
- parent_fe_bh,
- &bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- BUG_ON(!bh);
- *ret_de_bh = bh;
- get_bh(*ret_de_bh);
+ /*
+ * Caller will have to expand this
+ * directory.
+ */
+ status = -ENOSPC;
goto bail;
}
bh = ocfs2_bread(dir,
@@ -600,10 +1678,7 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
status = -EEXIST;
goto bail;
}
- if (((le64_to_cpu(de->inode) == 0) &&
- (le16_to_cpu(de->rec_len) >= rec_len)) ||
- (le16_to_cpu(de->rec_len) >=
- (OCFS2_DIR_REC_LEN(de->name_len) + rec_len))) {
+ if (ocfs2_dirent_would_fit(de, rec_len)) {
/* Ok, we found a spot. Return this bh and let
* the caller actually fill it in. */
*ret_de_bh = bh;
@@ -623,3 +1698,61 @@ bail:
mlog_exit(status);
return status;
}
+
+int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ const char *name,
+ int namelen,
+ struct buffer_head **ret_de_bh)
+{
+ int ret;
+ unsigned int blocks_wanted = 1;
+ struct buffer_head *bh = NULL;
+
+ mlog(0, "getting ready to insert namelen %d into dir %llu\n",
+ namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno);
+
+ *ret_de_bh = NULL;
+
+ if (!namelen) {
+ ret = -EINVAL;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
+ namelen, &bh, &blocks_wanted);
+ } else
+ ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
+
+ if (ret && ret != -ENOSPC) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (ret == -ENOSPC) {
+ /*
+ * We have to expand the directory to add this name.
+ */
+ BUG_ON(bh);
+
+ ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
+ &bh);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ BUG_ON(!bh);
+ }
+
+ *ret_de_bh = bh;
+ bh = NULL;
+out:
+ if (bh)
+ brelse(bh);
+ return ret;
+}
diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h
index 3f67e146864..ce48b9080d8 100644
--- a/fs/ocfs2/dir.h
+++ b/fs/ocfs2/dir.h
@@ -26,17 +26,49 @@
#ifndef OCFS2_DIR_H
#define OCFS2_DIR_H
+struct buffer_head *ocfs2_find_entry(const char *name,
+ int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir);
+int ocfs2_delete_entry(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh);
+int __ocfs2_add_entry(handle_t *handle,
+ struct inode *dir,
+ const char *name, int namelen,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh);
+static inline int ocfs2_add_entry(handle_t *handle,
+ struct dentry *dentry,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh)
+{
+ return __ocfs2_add_entry(handle, dentry->d_parent->d_inode,
+ dentry->d_name.name, dentry->d_name.len,
+ inode, blkno, parent_fe_bh, insert_bh);
+}
+int ocfs2_update_entry(struct inode *dir, handle_t *handle,
+ struct buffer_head *de_bh, struct ocfs2_dir_entry *de,
+ struct inode *new_entry_inode);
+
int ocfs2_check_dir_for_entry(struct inode *dir,
const char *name,
int namelen);
-int ocfs2_empty_dir(struct inode *inode); /* FIXME: to namei.c */
+int ocfs2_empty_dir(struct inode *inode);
int ocfs2_find_files_on_disk(const char *name,
int namelen,
u64 *blkno,
struct inode *inode,
struct buffer_head **dirent_bh,
struct ocfs2_dir_entry **dirent);
+int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
+ int namelen, u64 *blkno);
int ocfs2_readdir(struct file *filp, void *dirent, filldir_t filldir);
+int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
+ filldir_t filldir);
int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
@@ -44,11 +76,11 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
int namelen,
struct buffer_head **ret_de_bh);
struct ocfs2_alloc_context;
-int ocfs2_do_extend_dir(struct super_block *sb,
- handle_t *handle,
- struct inode *dir,
- struct buffer_head *parent_fe_bh,
- struct ocfs2_alloc_context *data_ac,
- struct ocfs2_alloc_context *meta_ac,
- struct buffer_head **new_bh);
+int ocfs2_fill_new_dir(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *data_ac);
+
#endif /* OCFS2_DIR_H */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index f71250ed166..41c76ff2fcf 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1482,6 +1482,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
lvb->lvb_imtime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
+ lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
out:
@@ -1515,6 +1516,7 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
+ oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
ocfs2_set_inode_flags(inode);
/* fast-symlinks are a special case */
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 492bad32a8c..87a785e4120 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -29,12 +29,12 @@
#include "dcache.h"
-#define OCFS2_LVB_VERSION 4
+#define OCFS2_LVB_VERSION 5
struct ocfs2_meta_lvb {
__u8 lvb_version;
__u8 lvb_reserved0;
- __be16 lvb_reserved1;
+ __be16 lvb_idynfeatures;
__be32 lvb_iclusters;
__be32 lvb_iuid;
__be32 lvb_igid;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index bc48177bd18..c3bbc198f9c 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -88,8 +88,6 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
struct dentry *parent;
struct inode *inode;
struct inode *dir = child->d_inode;
- struct buffer_head *dirent_bh = NULL;
- struct ocfs2_dir_entry *dirent;
mlog_entry("(0x%p, '%.*s')\n", child,
child->d_name.len, child->d_name.name);
@@ -105,8 +103,7 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
goto bail;
}
- status = ocfs2_find_files_on_disk("..", 2, &blkno, dir, &dirent_bh,
- &dirent);
+ status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
if (status < 0) {
parent = ERR_PTR(-ENOENT);
goto bail_unlock;
@@ -131,9 +128,6 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
bail_unlock:
ocfs2_meta_unlock(dir, 0);
- if (dirent_bh)
- brelse(dirent_bh);
-
bail:
mlog_exit_ptr(parent);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 03c1d365c78..c58668a326f 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -387,6 +387,12 @@ int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
struct ocfs2_extent_rec *rec;
u32 coff;
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ret = -ERANGE;
+ mlog_errno(ret);
+ goto out;
+ }
+
ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
num_clusters, extent_flags);
if (ret == 0)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f3bc3658e7a..a62b14eb406 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -397,6 +397,15 @@ static int ocfs2_truncate_file(struct inode *inode,
unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
truncate_inode_pages(inode->i_mapping, new_i_size);
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
+ i_size_read(inode), 0);
+ if (status)
+ mlog_errno(status);
+
+ goto bail_unlock_data;
+ }
+
/* alright, we're going to need to do a full blown alloc size
* change. Orphan the inode so that recovery can complete the
* truncate if necessary. This does the task of marking
@@ -779,25 +788,6 @@ leave:
return status;
}
-static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
- u32 clusters_to_add, int mark_unwritten)
-{
- int ret;
-
- /*
- * The alloc sem blocks peope in read/write from reading our
- * allocation until we're done changing it. We depend on
- * i_mutex to block other extend/truncate calls while we're
- * here.
- */
- down_write(&OCFS2_I(inode)->ip_alloc_sem);
- ret = __ocfs2_extend_allocation(inode, logical_start, clusters_to_add,
- mark_unwritten);
- up_write(&OCFS2_I(inode)->ip_alloc_sem);
-
- return ret;
-}
-
/* Some parts of this taken from generic_cont_expand, which turned out
* to be too fragile to do exactly what we need without us having to
* worry about recursive locking in ->prepare_write() and
@@ -889,25 +879,48 @@ out:
return ret;
}
-/*
- * A tail_to_skip value > 0 indicates that we're being called from
- * ocfs2_file_aio_write(). This has the following implications:
- *
- * - we don't want to update i_size
- * - di_bh will be NULL, which is fine because it's only used in the
- * case where we want to update i_size.
- * - ocfs2_zero_extend() will then only be filling the hole created
- * between i_size and the start of the write.
- */
+int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
+{
+ int ret;
+ u32 clusters_to_add;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
+ if (clusters_to_add < oi->ip_clusters)
+ clusters_to_add = 0;
+ else
+ clusters_to_add -= oi->ip_clusters;
+
+ if (clusters_to_add) {
+ ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
+ clusters_to_add, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ /*
+ * Call this even if we don't add any clusters to the tree. We
+ * still need to zero the area between the old i_size and the
+ * new i_size.
+ */
+ ret = ocfs2_zero_extend(inode, zero_to);
+ if (ret < 0)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
static int ocfs2_extend_file(struct inode *inode,
struct buffer_head *di_bh,
- u64 new_i_size,
- size_t tail_to_skip)
+ u64 new_i_size)
{
- int ret = 0;
- u32 clusters_to_add = 0;
+ int ret = 0, data_locked = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
- BUG_ON(!tail_to_skip && !di_bh);
+ BUG_ON(!di_bh);
/* setattr sometimes calls us like this. */
if (new_i_size == 0)
@@ -917,13 +930,18 @@ static int ocfs2_extend_file(struct inode *inode,
goto out;
BUG_ON(new_i_size < i_size_read(inode));
- if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
- BUG_ON(tail_to_skip != 0);
+ /*
+ * Fall through for converting inline data, even if the fs
+ * supports sparse files.
+ *
+ * The check for inline data here is legal - nobody can add
+ * the feature since we have i_mutex. We must check it again
+ * after acquiring ip_alloc_sem though, as paths like mmap
+ * might have raced us to converting the inode to extents.
+ */
+ if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
goto out_update_size;
- }
-
- clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
- OCFS2_I(inode)->ip_clusters;
/*
* protect the pages that ocfs2_zero_extend is going to be
@@ -937,39 +955,52 @@ static int ocfs2_extend_file(struct inode *inode,
mlog_errno(ret);
goto out;
}
+ data_locked = 1;
+
+ /*
+ * The alloc sem blocks people in read/write from reading our
+ * allocation until we're done changing it. We depend on
+ * i_mutex to block other extend/truncate calls while we're
+ * here.
+ */
+ down_write(&oi->ip_alloc_sem);
+
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ /*
+ * We can optimize small extends by keeping the inodes
+ * inline data.
+ */
+ if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
+ up_write(&oi->ip_alloc_sem);
+ goto out_update_size;
+ }
+
+ ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
+ if (ret) {
+ up_write(&oi->ip_alloc_sem);
- if (clusters_to_add) {
- ret = ocfs2_extend_allocation(inode,
- OCFS2_I(inode)->ip_clusters,
- clusters_to_add, 0);
- if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
}
}
- /*
- * Call this even if we don't add any clusters to the tree. We
- * still need to zero the area between the old i_size and the
- * new i_size.
- */
- ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
+ if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+ ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
+
+ up_write(&oi->ip_alloc_sem);
+
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
}
out_update_size:
- if (!tail_to_skip) {
- /* We're being called from ocfs2_setattr() which wants
- * us to update i_size */
- ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
- if (ret < 0)
- mlog_errno(ret);
- }
+ ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+ if (ret < 0)
+ mlog_errno(ret);
out_unlock:
- if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+ if (data_locked)
ocfs2_data_unlock(inode, 1);
out:
@@ -1035,7 +1066,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (i_size_read(inode) > attr->ia_size)
status = ocfs2_truncate_file(inode, bh, attr->ia_size);
else
- status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
+ status = ocfs2_extend_file(inode, bh, attr->ia_size);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -1243,6 +1274,31 @@ static int ocfs2_allocate_unwritten_extents(struct inode *inode,
{
int ret;
u32 cpos, phys_cpos, clusters, alloc_size;
+ u64 end = start + len;
+ struct buffer_head *di_bh = NULL;
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
+ OCFS2_I(inode)->ip_blkno, &di_bh,
+ OCFS2_BH_CACHED, inode);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * Nothing to do if the requested reservation range
+ * fits within the inode.
+ */
+ if (ocfs2_size_fits_inline_data(di_bh, end))
+ goto out;
+
+ ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
/*
* We consider both start and len to be inclusive.
@@ -1288,6 +1344,8 @@ next:
ret = 0;
out:
+
+ brelse(di_bh);
return ret;
}
@@ -1469,6 +1527,14 @@ static int ocfs2_remove_inode_range(struct inode *inode,
if (byte_len == 0)
return 0;
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
+ byte_start + byte_len, 1);
+ if (ret)
+ mlog_errno(ret);
+ return ret;
+ }
+
trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
trunc_len = (byte_start + byte_len) >> osb->s_clustersize_bits;
if (trunc_len >= trunc_start)
@@ -1713,15 +1779,13 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
int appending,
int *direct_io)
{
- int ret = 0, meta_level = appending;
+ int ret = 0, meta_level = 0;
struct inode *inode = dentry->d_inode;
- u32 clusters;
- loff_t newsize, saved_pos;
+ loff_t saved_pos, end;
/*
- * We sample i_size under a read level meta lock to see if our write
- * is extending the file, if it is we back off and get a write level
- * meta lock.
+ * We start with a read level meta lock and only jump to an ex
+ * if we need to make modifications here.
*/
for(;;) {
ret = ocfs2_meta_lock(inode, NULL, meta_level);
@@ -1763,87 +1827,47 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
saved_pos = *ppos;
}
- if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
- loff_t end = saved_pos + count;
+ end = saved_pos + count;
- /*
- * Skip the O_DIRECT checks if we don't need
- * them.
- */
- if (!direct_io || !(*direct_io))
- break;
-
- /*
- * Allowing concurrent direct writes means
- * i_size changes wouldn't be synchronized, so
- * one node could wind up truncating another
- * nodes writes.
- */
- if (end > i_size_read(inode)) {
- *direct_io = 0;
- break;
- }
-
- /*
- * We don't fill holes during direct io, so
- * check for them here. If any are found, the
- * caller will have to retake some cluster
- * locks and initiate the io as buffered.
- */
- ret = ocfs2_check_range_for_holes(inode, saved_pos,
- count);
- if (ret == 1) {
- *direct_io = 0;
- ret = 0;
- } else if (ret < 0)
- mlog_errno(ret);
+ /*
+ * Skip the O_DIRECT checks if we don't need
+ * them.
+ */
+ if (!direct_io || !(*direct_io))
break;
- }
/*
- * The rest of this loop is concerned with legacy file
- * systems which don't support sparse files.
+ * There's no sane way to do direct writes to an inode
+ * with inline data.
*/
-
- newsize = count + saved_pos;
-
- mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
- (long long) saved_pos, (long long) newsize,
- (long long) i_size_read(inode));
-
- /* No need for a higher level metadata lock if we're
- * never going past i_size. */
- if (newsize <= i_size_read(inode))
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ *direct_io = 0;
break;
-
- if (meta_level == 0) {
- ocfs2_meta_unlock(inode, meta_level);
- meta_level = 1;
- continue;
}
- spin_lock(&OCFS2_I(inode)->ip_lock);
- clusters = ocfs2_clusters_for_bytes(inode->i_sb, newsize) -
- OCFS2_I(inode)->ip_clusters;
- spin_unlock(&OCFS2_I(inode)->ip_lock);
-
- mlog(0, "Writing at EOF, may need more allocation: "
- "i_size = %lld, newsize = %lld, need %u clusters\n",
- (long long) i_size_read(inode), (long long) newsize,
- clusters);
-
- /* We only want to continue the rest of this loop if
- * our extend will actually require more
- * allocation. */
- if (!clusters)
+ /*
+ * Allowing concurrent direct writes means
+ * i_size changes wouldn't be synchronized, so
+ * one node could wind up truncating another
+ * nodes writes.
+ */
+ if (end > i_size_read(inode)) {
+ *direct_io = 0;
break;
-
- ret = ocfs2_extend_file(inode, NULL, newsize, count);
- if (ret < 0) {
- if (ret != -ENOSPC)
- mlog_errno(ret);
- goto out_unlock;
}
+
+ /*
+ * We don't fill holes during direct io, so
+ * check for them here. If any are found, the
+ * caller will have to retake some cluster
+ * locks and initiate the io as buffered.
+ */
+ ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
+ if (ret == 1) {
+ *direct_io = 0;
+ ret = 0;
+ } else if (ret < 0)
+ mlog_errno(ret);
break;
}
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 36fe27f268e..066f14add3a 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -47,6 +47,8 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret);
+int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size,
+ u64 zero_to);
int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
u32 clusters_to_add, u32 extents_to_split,
struct ocfs2_alloc_context **data_ac,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index c53a6763bbb..1d5e0cb0fda 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -241,6 +241,7 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
+ OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
inode->i_version = 1;
inode->i_generation = le32_to_cpu(fe->i_generation);
@@ -513,6 +514,10 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ /*
+ * This check will also skip truncate of inodes with inline
+ * data and fast symlinks.
+ */
if (fe->i_clusters) {
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
@@ -1220,6 +1225,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
ocfs2_get_inode_flags(OCFS2_I(inode));
fe->i_attr = cpu_to_le32(OCFS2_I(inode)->ip_attr);
+ fe->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
spin_unlock(&OCFS2_I(inode)->ip_lock);
fe->i_size = cpu_to_le64(i_size_read(inode));
@@ -1257,6 +1263,7 @@ void ocfs2_refresh_inode(struct inode *inode,
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
+ OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
ocfs2_set_inode_flags(inode);
i_size_write(inode, le64_to_cpu(fe->i_size));
inode->i_nlink = le16_to_cpu(fe->i_links_count);
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index a41d0817121..70e881c5553 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -51,6 +51,7 @@ struct ocfs2_inode_info
u32 ip_flags; /* see below */
u32 ip_attr; /* inode attributes */
+ u16 ip_dyn_features;
/* protected by recovery_lock. */
struct inode *ip_next_orphan;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index dbfb20bb27e..f9d01e25298 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -35,13 +35,13 @@
#include "ocfs2.h"
#include "alloc.h"
+#include "dir.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
-#include "namei.h"
#include "slot_map.h"
#include "super.h"
#include "vote.h"
@@ -1213,17 +1213,49 @@ bail:
return status;
}
+struct ocfs2_orphan_filldir_priv {
+ struct inode *head;
+ struct ocfs2_super *osb;
+};
+
+static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
+ loff_t pos, u64 ino, unsigned type)
+{
+ struct ocfs2_orphan_filldir_priv *p = priv;
+ struct inode *iter;
+
+ if (name_len == 1 && !strncmp(".", name, 1))
+ return 0;
+ if (name_len == 2 && !strncmp("..", name, 2))
+ return 0;
+
+ /* Skip bad inodes so that recovery can continue */
+ iter = ocfs2_iget(p->osb, ino,
+ OCFS2_FI_FLAG_ORPHAN_RECOVERY);
+ if (IS_ERR(iter))
+ return 0;
+
+ mlog(0, "queue orphan %llu\n",
+ (unsigned long long)OCFS2_I(iter)->ip_blkno);
+ /* No locking is required for the next_orphan queue as there
+ * is only ever a single process doing orphan recovery. */
+ OCFS2_I(iter)->ip_next_orphan = p->head;
+ p->head = iter;
+
+ return 0;
+}
+
static int ocfs2_queue_orphans(struct ocfs2_super *osb,
int slot,
struct inode **head)
{
int status;
struct inode *orphan_dir_inode = NULL;
- struct inode *iter;
- unsigned long offset, blk, local;
- struct buffer_head *bh = NULL;
- struct ocfs2_dir_entry *de;
- struct super_block *sb = osb->sb;
+ struct ocfs2_orphan_filldir_priv priv;
+ loff_t pos = 0;
+
+ priv.osb = osb;
+ priv.head = *head;
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
@@ -1241,77 +1273,15 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
goto out;
}
- offset = 0;
- iter = NULL;
- while(offset < i_size_read(orphan_dir_inode)) {
- blk = offset >> sb->s_blocksize_bits;
-
- bh = ocfs2_bread(orphan_dir_inode, blk, &status, 0);
- if (!bh)
- status = -EINVAL;
- if (status < 0) {
- if (bh)
- brelse(bh);
- mlog_errno(status);
- goto out_unlock;
- }
-
- local = 0;
- while(offset < i_size_read(orphan_dir_inode)
- && local < sb->s_blocksize) {
- de = (struct ocfs2_dir_entry *) (bh->b_data + local);
-
- if (!ocfs2_check_dir_entry(orphan_dir_inode,
- de, bh, local)) {
- status = -EINVAL;
- mlog_errno(status);
- brelse(bh);
- goto out_unlock;
- }
-
- local += le16_to_cpu(de->rec_len);
- offset += le16_to_cpu(de->rec_len);
-
- /* I guess we silently fail on no inode? */
- if (!le64_to_cpu(de->inode))
- continue;
- if (de->file_type > OCFS2_FT_MAX) {
- mlog(ML_ERROR,
- "block %llu contains invalid de: "
- "inode = %llu, rec_len = %u, "
- "name_len = %u, file_type = %u, "
- "name='%.*s'\n",
- (unsigned long long)bh->b_blocknr,
- (unsigned long long)le64_to_cpu(de->inode),
- le16_to_cpu(de->rec_len),
- de->name_len,
- de->file_type,
- de->name_len,
- de->name);
- continue;
- }
- if (de->name_len == 1 && !strncmp(".", de->name, 1))
- continue;
- if (de->name_len == 2 && !strncmp("..", de->name, 2))
- continue;
-
- iter = ocfs2_iget(osb, le64_to_cpu(de->inode),
- OCFS2_FI_FLAG_ORPHAN_RECOVERY);
- if (IS_ERR(iter))
- continue;
-
- mlog(0, "queue orphan %llu\n",
- (unsigned long long)OCFS2_I(iter)->ip_blkno);
- /* No locking is required for the next_orphan
- * queue as there is only ever a single
- * process doing orphan recovery. */
- OCFS2_I(iter)->ip_next_orphan = *head;
- *head = iter;
- }
- brelse(bh);
+ status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
+ ocfs2_orphan_filldir);
+ if (status) {
+ mlog_errno(status);
+ goto out;
}
-out_unlock:
+ *head = priv.head;
+
ocfs2_meta_unlock(orphan_dir_inode, 0);
out:
mutex_unlock(&orphan_dir_inode->i_mutex);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index ce60aab013a..4b32e096156 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -282,6 +282,9 @@ int ocfs2_journal_dirty_data(handle_t *handle,
* prev. group desc. if we relink. */
#define OCFS2_SUBALLOC_ALLOC (3)
+#define OCFS2_INLINE_TO_EXTENTS_CREDITS (OCFS2_SUBALLOC_ALLOC \
+ + OCFS2_INODE_UPDATE_CREDITS)
+
/* dinode + group descriptor update. We don't relink on free yet. */
#define OCFS2_SUBALLOC_FREE (2)
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 701e6d04ed5..729259016c1 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -64,29 +64,6 @@
#include "buffer_head_io.h"
-#define NAMEI_RA_CHUNKS 2
-#define NAMEI_RA_BLOCKS 4
-#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
-
-static int inline ocfs2_search_dirblock(struct buffer_head *bh,
- struct inode *dir,
- const char *name, int namelen,
- unsigned long offset,
- struct ocfs2_dir_entry **res_dir);
-
-static int ocfs2_delete_entry(handle_t *handle,
- struct inode *dir,
- struct ocfs2_dir_entry *de_del,
- struct buffer_head *bh);
-
-static int __ocfs2_add_entry(handle_t *handle,
- struct inode *dir,
- const char *name, int namelen,
- struct inode *inode, u64 blkno,
- struct buffer_head *parent_fe_bh,
- struct buffer_head *insert_bh);
-
static int ocfs2_mknod_locked(struct ocfs2_super *osb,
struct inode *dir,
struct dentry *dentry, int mode,
@@ -97,13 +74,6 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
struct inode **ret_inode,
struct ocfs2_alloc_context *inode_ac);
-static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *parent,
- struct inode *inode,
- struct buffer_head *fe_bh,
- struct ocfs2_alloc_context *data_ac);
-
static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
struct inode *inode,
@@ -123,17 +93,6 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
struct inode *inode,
const char *symname);
-static inline int ocfs2_add_entry(handle_t *handle,
- struct dentry *dentry,
- struct inode *inode, u64 blkno,
- struct buffer_head *parent_fe_bh,
- struct buffer_head *insert_bh)
-{
- return __ocfs2_add_entry(handle, dentry->d_parent->d_inode,
- dentry->d_name.name, dentry->d_name.len,
- inode, blkno, parent_fe_bh, insert_bh);
-}
-
/* An orphan dir name is an 8 byte value, printed as a hex string */
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
@@ -142,10 +101,8 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
{
int status;
u64 blkno;
- struct buffer_head *dirent_bh = NULL;
struct inode *inode = NULL;
struct dentry *ret;
- struct ocfs2_dir_entry *dirent;
struct ocfs2_inode_info *oi;
mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
@@ -167,9 +124,8 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
goto bail;
}
- status = ocfs2_find_files_on_disk(dentry->d_name.name,
- dentry->d_name.len, &blkno,
- dir, &dirent_bh, &dirent);
+ status = ocfs2_lookup_ino_from_name(dir, dentry->d_name.name,
+ dentry->d_name.len, &blkno);
if (status < 0)
goto bail_add;
@@ -224,83 +180,12 @@ bail_unlock:
ocfs2_meta_unlock(dir, 0);
bail:
- if (dirent_bh)
- brelse(dirent_bh);
mlog_exit_ptr(ret);
return ret;
}
-static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *parent,
- struct inode *inode,
- struct buffer_head *fe_bh,
- struct ocfs2_alloc_context *data_ac)
-{
- int status;
- struct buffer_head *new_bh = NULL;
- struct ocfs2_dir_entry *de = NULL;
-
- mlog_entry_void();
-
- status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
- data_ac, NULL, &new_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- ocfs2_set_new_buffer_uptodate(inode, new_bh);
-
- status = ocfs2_journal_access(handle, inode, new_bh,
- OCFS2_JOURNAL_ACCESS_CREATE);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- memset(new_bh->b_data, 0, osb->sb->s_blocksize);
-
- de = (struct ocfs2_dir_entry *) new_bh->b_data;
- de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
- de->name_len = 1;
- de->rec_len =
- cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
- strcpy(de->name, ".");
- ocfs2_set_de_type(de, S_IFDIR);
- de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
- de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
- de->rec_len = cpu_to_le16(inode->i_sb->s_blocksize -
- OCFS2_DIR_REC_LEN(1));
- de->name_len = 2;
- strcpy(de->name, "..");
- ocfs2_set_de_type(de, S_IFDIR);
-
- status = ocfs2_journal_dirty(handle, new_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- i_size_write(inode, inode->i_sb->s_blocksize);
- inode->i_nlink = 2;
- inode->i_blocks = ocfs2_inode_sector_count(inode);
- status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- status = 0;
-bail:
- if (new_bh)
- brelse(new_bh);
-
- mlog_exit(status);
- return status;
-}
-
static int ocfs2_mknod(struct inode *dir,
struct dentry *dentry,
int mode,
@@ -365,9 +250,8 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
- /* are we making a directory? If so, reserve a cluster for his
- * 1st extent. */
- if (S_ISDIR(mode)) {
+ /* Reserve a cluster if creating an extent based directory. */
+ if (S_ISDIR(mode) && !ocfs2_supports_inline_data(osb)) {
status = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (status < 0) {
if (status != -ENOSPC)
@@ -564,10 +448,21 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
cpu_to_le32(CURRENT_TIME.tv_nsec);
fe->i_dtime = 0;
- fel = &fe->id2.i_list;
- fel->l_tree_depth = 0;
- fel->l_next_free_rec = 0;
- fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
+ /*
+ * If supported, directories start with inline data.
+ */
+ if (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) {
+ u16 feat = le16_to_cpu(fe->i_dyn_features);
+
+ fe->i_dyn_features = cpu_to_le16(feat | OCFS2_INLINE_DATA_FL);
+
+ fe->id2.i_data.id_count = cpu_to_le16(ocfs2_max_inline_data(osb->sb));
+ } else {
+ fel = &fe->id2.i_list;
+ fel->l_tree_depth = 0;
+ fel->l_next_free_rec = 0;
+ fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
+ }
status = ocfs2_journal_dirty(handle, *new_fe_bh);
if (status < 0) {
@@ -1048,11 +943,6 @@ static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2)
ocfs2_meta_unlock(inode2, 1);
}
-#define PARENT_INO(buffer) \
- ((struct ocfs2_dir_entry *) \
- ((char *)buffer + \
- le16_to_cpu(((struct ocfs2_dir_entry *)buffer)->rec_len)))->inode
-
static int ocfs2_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
@@ -1070,12 +960,12 @@ static int ocfs2_rename(struct inode *old_dir,
struct buffer_head *old_inode_bh = NULL;
struct buffer_head *insert_entry_bh = NULL;
struct ocfs2_super *osb = NULL;
- u64 newfe_blkno;
+ u64 newfe_blkno, old_de_ino;
handle_t *handle = NULL;
struct buffer_head *old_dir_bh = NULL;
struct buffer_head *new_dir_bh = NULL;
- struct ocfs2_dir_entry *old_de = NULL, *new_de = NULL; // dirent for old_dentry
- // and new_dentry
+ struct ocfs2_dir_entry *old_inode_dot_dot_de = NULL, *old_de = NULL,
+ *new_de = NULL;
struct buffer_head *new_de_bh = NULL, *old_de_bh = NULL; // bhs for above
struct buffer_head *old_inode_de_bh = NULL; // if old_dentry is a dir,
// this is the 1st dirent bh
@@ -1159,27 +1049,35 @@ static int ocfs2_rename(struct inode *old_dir,
}
if (S_ISDIR(old_inode->i_mode)) {
- status = -EIO;
- old_inode_de_bh = ocfs2_bread(old_inode, 0, &status, 0);
- if (!old_inode_de_bh)
+ u64 old_inode_parent;
+
+ status = ocfs2_find_files_on_disk("..", 2, &old_inode_parent,
+ old_inode, &old_inode_de_bh,
+ &old_inode_dot_dot_de);
+ if (status) {
+ status = -EIO;
goto bail;
+ }
- status = -EIO;
- if (le64_to_cpu(PARENT_INO(old_inode_de_bh->b_data)) !=
- OCFS2_I(old_dir)->ip_blkno)
+ if (old_inode_parent != OCFS2_I(old_dir)->ip_blkno) {
+ status = -EIO;
goto bail;
- status = -EMLINK;
- if (!new_inode && new_dir!=old_dir &&
- new_dir->i_nlink >= OCFS2_LINK_MAX)
+ }
+
+ if (!new_inode && new_dir != old_dir &&
+ new_dir->i_nlink >= OCFS2_LINK_MAX) {
+ status = -EMLINK;
goto bail;
+ }
}
- status = -ENOENT;
- old_de_bh = ocfs2_find_entry(old_dentry->d_name.name,
- old_dentry->d_name.len,
- old_dir, &old_de);
- if (!old_de_bh)
+ status = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
+ old_dentry->d_name.len,
+ &old_de_ino);
+ if (status) {
+ status = -ENOENT;
goto bail;
+ }
/*
* Check for inode number is _not_ due to possible IO errors.
@@ -1187,8 +1085,10 @@ static int ocfs2_rename(struct inode *old_dir,
* and merrily kill the link to whatever was created under the
* same name. Goodbye sticky bit ;-<
*/
- if (le64_to_cpu(old_de->inode) != OCFS2_I(old_inode)->ip_blkno)
+ if (old_de_ino != OCFS2_I(old_inode)->ip_blkno) {
+ status = -ENOENT;
goto bail;
+ }
/* check if the target already exists (in which case we need
* to delete it */
@@ -1321,20 +1221,13 @@ static int ocfs2_rename(struct inode *old_dir,
}
/* change the dirent to point to the correct inode */
- status = ocfs2_journal_access(handle, new_dir, new_de_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ status = ocfs2_update_entry(new_dir, handle, new_de_bh,
+ new_de, old_inode);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- new_de->inode = cpu_to_le64(OCFS2_I(old_inode)->ip_blkno);
- new_de->file_type = old_de->file_type;
new_dir->i_version++;
- status = ocfs2_journal_dirty(handle, new_de_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
if (S_ISDIR(new_inode->i_mode))
newfe->i_links_count = 0;
@@ -1370,7 +1263,21 @@ static int ocfs2_rename(struct inode *old_dir,
} else
mlog_errno(status);
- /* now that the name has been added to new_dir, remove the old name */
+ /*
+ * Now that the name has been added to new_dir, remove the old name.
+ *
+ * We don't keep any directory entry context around until now
+ * because the insert might have changed the type of directory
+ * we're dealing with.
+ */
+ old_de_bh = ocfs2_find_entry(old_dentry->d_name.name,
+ old_dentry->d_name.len,
+ old_dir, &old_de);
+ if (!old_de_bh) {
+ status = -EIO;
+ goto bail;
+ }
+
status = ocfs2_delete_entry(handle, old_dir, old_de, old_de_bh);
if (status < 0) {
mlog_errno(status);
@@ -1383,12 +1290,8 @@ static int ocfs2_rename(struct inode *old_dir,
}
old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
if (old_inode_de_bh) {
- status = ocfs2_journal_access(handle, old_inode,
- old_inode_de_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- PARENT_INO(old_inode_de_bh->b_data) =
- cpu_to_le64(OCFS2_I(new_dir)->ip_blkno);
- status = ocfs2_journal_dirty(handle, old_inode_de_bh);
+ status = ocfs2_update_entry(old_inode, handle, old_inode_de_bh,
+ old_inode_dot_dot_de, new_dir);
old_dir->i_nlink--;
if (new_inode) {
new_inode->i_nlink--;
@@ -1767,329 +1670,6 @@ bail:
return status;
}
-int ocfs2_check_dir_entry(struct inode * dir,
- struct ocfs2_dir_entry * de,
- struct buffer_head * bh,
- unsigned long offset)
-{
- const char *error_msg = NULL;
- const int rlen = le16_to_cpu(de->rec_len);
-
- if (rlen < OCFS2_DIR_REC_LEN(1))
- error_msg = "rec_len is smaller than minimal";
- else if (rlen % 4 != 0)
- error_msg = "rec_len % 4 != 0";
- else if (rlen < OCFS2_DIR_REC_LEN(de->name_len))
- error_msg = "rec_len is too small for name_len";
- else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
- error_msg = "directory entry across blocks";
-
- if (error_msg != NULL)
- mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
- "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
- offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
- de->name_len);
- return error_msg == NULL ? 1 : 0;
-}
-
-/* we don't always have a dentry for what we want to add, so people
- * like orphan dir can call this instead.
- *
- * If you pass me insert_bh, I'll skip the search of the other dir
- * blocks and put the record in there.
- */
-static int __ocfs2_add_entry(handle_t *handle,
- struct inode *dir,
- const char *name, int namelen,
- struct inode *inode, u64 blkno,
- struct buffer_head *parent_fe_bh,
- struct buffer_head *insert_bh)
-{
- unsigned long offset;
- unsigned short rec_len;
- struct ocfs2_dir_entry *de, *de1;
- struct super_block *sb;
- int retval, status;
-
- mlog_entry_void();
-
- sb = dir->i_sb;
-
- if (!namelen)
- return -EINVAL;
-
- rec_len = OCFS2_DIR_REC_LEN(namelen);
- offset = 0;
- de = (struct ocfs2_dir_entry *) insert_bh->b_data;
- while (1) {
- BUG_ON((char *)de >= sb->s_blocksize + insert_bh->b_data);
- /* These checks should've already been passed by the
- * prepare function, but I guess we can leave them
- * here anyway. */
- if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
- retval = -ENOENT;
- goto bail;
- }
- if (ocfs2_match(namelen, name, de)) {
- retval = -EEXIST;
- goto bail;
- }
- if (((le64_to_cpu(de->inode) == 0) &&
- (le16_to_cpu(de->rec_len) >= rec_len)) ||
- (le16_to_cpu(de->rec_len) >=
- (OCFS2_DIR_REC_LEN(de->name_len) + rec_len))) {
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
- if (retval < 0) {
- mlog_errno(retval);
- goto bail;
- }
-
- status = ocfs2_journal_access(handle, dir, insert_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- /* By now the buffer is marked for journaling */
- offset += le16_to_cpu(de->rec_len);
- if (le64_to_cpu(de->inode)) {
- de1 = (struct ocfs2_dir_entry *)((char *) de +
- OCFS2_DIR_REC_LEN(de->name_len));
- de1->rec_len =
- cpu_to_le16(le16_to_cpu(de->rec_len) -
- OCFS2_DIR_REC_LEN(de->name_len));
- de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
- de = de1;
- }
- de->file_type = OCFS2_FT_UNKNOWN;
- if (blkno) {
- de->inode = cpu_to_le64(blkno);
- ocfs2_set_de_type(de, inode->i_mode);
- } else
- de->inode = 0;
- de->name_len = namelen;
- memcpy(de->name, name, namelen);
-
- dir->i_version++;
- status = ocfs2_journal_dirty(handle, insert_bh);
- retval = 0;
- goto bail;
- }
- offset += le16_to_cpu(de->rec_len);
- de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
- }
-
- /* when you think about it, the assert above should prevent us
- * from ever getting here. */
- retval = -ENOSPC;
-bail:
-
- mlog_exit(retval);
- return retval;
-}
-
-
-/*
- * ocfs2_delete_entry deletes a directory entry by merging it with the
- * previous entry
- */
-static int ocfs2_delete_entry(handle_t *handle,
- struct inode *dir,
- struct ocfs2_dir_entry *de_del,
- struct buffer_head *bh)
-{
- struct ocfs2_dir_entry *de, *pde;
- int i, status = -ENOENT;
-
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh);
-
- i = 0;
- pde = NULL;
- de = (struct ocfs2_dir_entry *) bh->b_data;
- while (i < bh->b_size) {
- if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
- status = -EIO;
- mlog_errno(status);
- goto bail;
- }
- if (de == de_del) {
- status = ocfs2_journal_access(handle, dir, bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (status < 0) {
- status = -EIO;
- mlog_errno(status);
- goto bail;
- }
- if (pde)
- pde->rec_len =
- cpu_to_le16(le16_to_cpu(pde->rec_len) +
- le16_to_cpu(de->rec_len));
- else
- de->inode = 0;
- dir->i_version++;
- status = ocfs2_journal_dirty(handle, bh);
- goto bail;
- }
- i += le16_to_cpu(de->rec_len);
- pde = de;
- de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
- }
-bail:
- mlog_exit(status);
- return status;
-}
-
-/*
- * Returns 0 if not found, -1 on failure, and 1 on success
- */
-static int inline ocfs2_search_dirblock(struct buffer_head *bh,
- struct inode *dir,
- const char *name, int namelen,
- unsigned long offset,
- struct ocfs2_dir_entry **res_dir)
-{
- struct ocfs2_dir_entry *de;
- char *dlimit, *de_buf;
- int de_len;
- int ret = 0;
-
- mlog_entry_void();
-
- de_buf = bh->b_data;
- dlimit = de_buf + dir->i_sb->s_blocksize;
-
- while (de_buf < dlimit) {
- /* this code is executed quadratically often */
- /* do minimal checking `by hand' */
-
- de = (struct ocfs2_dir_entry *) de_buf;
-
- if (de_buf + namelen <= dlimit &&
- ocfs2_match(namelen, name, de)) {
- /* found a match - just to be sure, do a full check */
- if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
- ret = -1;
- goto bail;
- }
- *res_dir = de;
- ret = 1;
- goto bail;
- }
-
- /* prevent looping on a bad block */
- de_len = le16_to_cpu(de->rec_len);
- if (de_len <= 0) {
- ret = -1;
- goto bail;
- }
-
- de_buf += de_len;
- offset += de_len;
- }
-
-bail:
- mlog_exit(ret);
- return ret;
-}
-
-struct buffer_head *ocfs2_find_entry(const char *name, int namelen,
- struct inode *dir,
- struct ocfs2_dir_entry **res_dir)
-{
- struct super_block *sb;
- struct buffer_head *bh_use[NAMEI_RA_SIZE];
- struct buffer_head *bh, *ret = NULL;
- unsigned long start, block, b;
- int ra_max = 0; /* Number of bh's in the readahead
- buffer, bh_use[] */
- int ra_ptr = 0; /* Current index into readahead
- buffer */
- int num = 0;
- int nblocks, i, err;
-
- mlog_entry_void();
-
- *res_dir = NULL;
- sb = dir->i_sb;
-
- nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
- start = OCFS2_I(dir)->ip_dir_start_lookup;
- if (start >= nblocks)
- start = 0;
- block = start;
-
-restart:
- do {
- /*
- * We deal with the read-ahead logic here.
- */
- if (ra_ptr >= ra_max) {
- /* Refill the readahead buffer */
- ra_ptr = 0;
- b = block;
- for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
- /*
- * Terminate if we reach the end of the
- * directory and must wrap, or if our
- * search has finished at this block.
- */
- if (b >= nblocks || (num && block == start)) {
- bh_use[ra_max] = NULL;
- break;
- }
- num++;
-
- bh = ocfs2_bread(dir, b++, &err, 1);
- bh_use[ra_max] = bh;
- }
- }
- if ((bh = bh_use[ra_ptr++]) == NULL)
- goto next;
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- /* read error, skip block & hope for the best */
- ocfs2_error(dir->i_sb, "reading directory %llu, "
- "offset %lu\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- block);
- brelse(bh);
- goto next;
- }
- i = ocfs2_search_dirblock(bh, dir, name, namelen,
- block << sb->s_blocksize_bits,
- res_dir);
- if (i == 1) {
- OCFS2_I(dir)->ip_dir_start_lookup = block;
- ret = bh;
- goto cleanup_and_exit;
- } else {
- brelse(bh);
- if (i < 0)
- goto cleanup_and_exit;
- }
- next:
- if (++block >= nblocks)
- block = 0;
- } while (block != start);
-
- /*
- * If the directory has grown while we were searching, then
- * search the last part of the directory before giving up.
- */
- block = nblocks;
- nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
- if (block < nblocks) {
- start = 0;
- goto restart;
- }
-
-cleanup_and_exit:
- /* Clean up the read-ahead blocks */
- for (; ra_ptr < ra_max; ra_ptr++)
- brelse(bh_use[ra_ptr]);
-
- mlog_exit_ptr(ret);
- return ret;
-}
-
static int ocfs2_blkno_stringify(u64 blkno, char *name)
{
int status, namelen;
diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h
index 0975c7b7212..688aef64c87 100644
--- a/fs/ocfs2/namei.h
+++ b/fs/ocfs2/namei.h
@@ -30,29 +30,10 @@ extern const struct inode_operations ocfs2_dir_iops;
struct dentry *ocfs2_get_parent(struct dentry *child);
-int ocfs2_check_dir_entry (struct inode *dir,
- struct ocfs2_dir_entry *de,
- struct buffer_head *bh,
- unsigned long offset);
-struct buffer_head *ocfs2_find_entry(const char *name,
- int namelen,
- struct inode *dir,
- struct ocfs2_dir_entry **res_dir);
int ocfs2_orphan_del(struct ocfs2_super *osb,
handle_t *handle,
struct inode *orphan_dir_inode,
struct inode *inode,
struct buffer_head *orphan_dir_bh);
-static inline int ocfs2_match(int len,
- const char * const name,
- struct ocfs2_dir_entry *de)
-{
- if (len != de->name_len)
- return 0;
- if (!de->inode)
- return 0;
- return !memcmp(name, de->name, len);
-}
-
#endif /* OCFS2_NAMEI_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 58307853fb4..60a23e1906b 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -319,6 +319,13 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
return 0;
}
+static inline int ocfs2_supports_inline_data(struct ocfs2_super *osb)
+{
+ if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
+ return 1;
+ return 0;
+}
+
/* set / clear functions because cluster events can make these happen
* in parallel so we want the transitions to be atomic. this also
* means that any future flags osb_flags must be protected by spinlock
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 82f8a75b207..6ef876759a7 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -87,7 +87,8 @@
#define OCFS2_FEATURE_COMPAT_SUPP OCFS2_FEATURE_COMPAT_BACKUP_SB
#define OCFS2_FEATURE_INCOMPAT_SUPP (OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \
- | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC)
+ | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC \
+ | OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
#define OCFS2_FEATURE_RO_COMPAT_SUPP OCFS2_FEATURE_RO_COMPAT_UNWRITTEN
/*
@@ -111,6 +112,20 @@
#define OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC 0x0010
/*
+ * Tunefs sets this incompat flag before starting an operation which
+ * would require cleanup on abort. This is done to protect users from
+ * inadvertently mounting the fs after an aborted run without
+ * fsck-ing.
+ *
+ * s_tunefs_flags on the super block describes precisely which
+ * operations were in progress.
+ */
+#define OCFS2_FEATURE_INCOMPAT_TUNEFS_INPROG 0x0020
+
+/* Support for data packed into inode blocks */
+#define OCFS2_FEATURE_INCOMPAT_INLINE_DATA 0x0040
+
+/*
* backup superblock flag is used to indicate that this volume
* has backup superblocks.
*/
@@ -130,6 +145,11 @@
#define OCFS2_MAX_BACKUP_SUPERBLOCKS 6
/*
+ * Flags on ocfs2_super_block.s_tunefs_flags
+ */
+#define OCFS2_TUNEFS_INPROG_REMOVE_SLOT 0x0001 /* Removing slots */
+
+/*
* Flags on ocfs2_dinode.i_flags
*/
#define OCFS2_VALID_FL (0x00000001) /* Inode is valid */
@@ -146,6 +166,17 @@
#define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */
#define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */
+/*
+ * Flags on ocfs2_dinode.i_dyn_features
+ *
+ * These can change much more often than i_flags. When adding flags,
+ * keep in mind that i_dyn_features is only 16 bits wide.
+ */
+#define OCFS2_INLINE_DATA_FL (0x0001) /* Data stored in inode block */
+#define OCFS2_HAS_XATTR_FL (0x0002)
+#define OCFS2_INLINE_XATTR_FL (0x0004)
+#define OCFS2_INDEXED_DIR_FL (0x0008)
+
/* Inode attributes, keep in sync with EXT2 */
#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */
#define OCFS2_UNRM_FL (0x00000002) /* Undelete */
@@ -447,8 +478,8 @@ struct ocfs2_super_block {
__le32 s_clustersize_bits; /* Clustersize for this fs */
/*40*/ __le16 s_max_slots; /* Max number of simultaneous mounts
before tunefs required */
- __le16 s_reserved1;
- __le32 s_reserved2;
+ __le16 s_tunefs_flag;
+ __le32 s_reserved1;
__le64 s_first_cluster_group; /* Block offset of 1st cluster
* group header */
/*50*/ __u8 s_label[OCFS2_MAX_VOL_LABEL_LEN]; /* Label for mounting, etc. */
@@ -471,6 +502,19 @@ struct ocfs2_local_alloc
};
/*
+ * Data-in-inode header. This is only used if i_dyn_features has
+ * OCFS2_INLINE_DATA_FL set.
+ */
+struct ocfs2_inline_data
+{
+/*00*/ __le16 id_count; /* Number of bytes that can be used
+ * for data, starting at id_data */
+ __le16 id_reserved0;
+ __le32 id_reserved1;
+ __u8 id_data[0]; /* Start of user data */
+};
+
+/*
* On disk inode for OCFS2
*/
struct ocfs2_dinode {
@@ -502,7 +546,7 @@ struct ocfs2_dinode {
__le32 i_attr;
__le16 i_orphaned_slot; /* Only valid when OCFS2_ORPHANED_FL
was set in i_flags */
- __le16 i_reserved1;
+ __le16 i_dyn_features;
/*70*/ __le64 i_reserved2[8];
/*B8*/ union {
__le64 i_pad1; /* Generic way to refer to this
@@ -528,6 +572,7 @@ struct ocfs2_dinode {
struct ocfs2_chain_list i_chain;
struct ocfs2_extent_list i_list;
struct ocfs2_truncate_log i_dealloc;
+ struct ocfs2_inline_data i_data;
__u8 i_symlink[0];
} id2;
/* Actual on-disk size is one block */
@@ -577,6 +622,12 @@ static inline int ocfs2_fast_symlink_chars(struct super_block *sb)
offsetof(struct ocfs2_dinode, id2.i_symlink);
}
+static inline int ocfs2_max_inline_data(struct super_block *sb)
+{
+ return sb->s_blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+}
+
static inline int ocfs2_extent_recs_per_inode(struct super_block *sb)
{
int size;
@@ -656,6 +707,11 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
}
+static inline int ocfs2_max_inline_data(int blocksize)
+{
+ return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+}
+
static inline int ocfs2_extent_recs_per_inode(int blocksize)
{
int size;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c034b5129c1..0e2a1b45bf9 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -39,6 +39,7 @@
#include <linux/parser.h>
#include <linux/crc32.h>
#include <linux/debugfs.h>
+#include <linux/mount.h>
#include <cluster/nodemanager.h>
@@ -91,6 +92,7 @@ struct mount_options
static int ocfs2_parse_options(struct super_block *sb, char *options,
struct mount_options *mopt,
int is_remount);
+static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt);
static void ocfs2_put_super(struct super_block *sb);
static int ocfs2_mount_volume(struct super_block *sb);
static int ocfs2_remount(struct super_block *sb, int *flags, char *data);
@@ -105,7 +107,7 @@ static int ocfs2_sync_fs(struct super_block *sb, int wait);
static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb);
static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb);
-static int ocfs2_release_system_inodes(struct ocfs2_super *osb);
+static void ocfs2_release_system_inodes(struct ocfs2_super *osb);
static int ocfs2_fill_local_node_info(struct ocfs2_super *osb);
static int ocfs2_check_volume(struct ocfs2_super *osb);
static int ocfs2_verify_volume(struct ocfs2_dinode *di,
@@ -133,6 +135,7 @@ static const struct super_operations ocfs2_sops = {
.write_super = ocfs2_write_super,
.put_super = ocfs2_put_super,
.remount_fs = ocfs2_remount,
+ .show_options = ocfs2_show_options,
};
enum {
@@ -177,7 +180,7 @@ static void ocfs2_write_super(struct super_block *sb)
static int ocfs2_sync_fs(struct super_block *sb, int wait)
{
- int status = 0;
+ int status;
tid_t target;
struct ocfs2_super *osb = OCFS2_SB(sb);
@@ -275,9 +278,9 @@ bail:
return status;
}
-static int ocfs2_release_system_inodes(struct ocfs2_super *osb)
+static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
{
- int status = 0, i;
+ int i;
struct inode *inode;
mlog_entry_void();
@@ -302,8 +305,7 @@ static int ocfs2_release_system_inodes(struct ocfs2_super *osb)
osb->root_inode = NULL;
}
- mlog_exit(status);
- return status;
+ mlog_exit(0);
}
/* We're allocating fs objects, use GFP_NOFS */
@@ -453,7 +455,7 @@ static int ocfs2_sb_probe(struct super_block *sb,
struct buffer_head **bh,
int *sector_size)
{
- int status = 0, tmpstat;
+ int status, tmpstat;
struct ocfs1_vol_disk_hdr *hdr;
struct ocfs2_dinode *di;
int blksize;
@@ -830,6 +832,41 @@ bail:
return status;
}
+static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+{
+ struct ocfs2_super *osb = OCFS2_SB(mnt->mnt_sb);
+ unsigned long opts = osb->s_mount_opt;
+
+ if (opts & OCFS2_MOUNT_HB_LOCAL)
+ seq_printf(s, ",_netdev,heartbeat=local");
+ else
+ seq_printf(s, ",heartbeat=none");
+
+ if (opts & OCFS2_MOUNT_NOINTR)
+ seq_printf(s, ",nointr");
+
+ if (opts & OCFS2_MOUNT_DATA_WRITEBACK)
+ seq_printf(s, ",data=writeback");
+ else
+ seq_printf(s, ",data=ordered");
+
+ if (opts & OCFS2_MOUNT_BARRIER)
+ seq_printf(s, ",barrier=1");
+
+ if (opts & OCFS2_MOUNT_ERRORS_PANIC)
+ seq_printf(s, ",errors=panic");
+ else
+ seq_printf(s, ",errors=remount-ro");
+
+ if (osb->preferred_slot != OCFS2_INVALID_SLOT)
+ seq_printf(s, ",preferred_slot=%d", osb->preferred_slot);
+
+ if (osb->s_atime_quantum != OCFS2_DEFAULT_ATIME_QUANTUM)
+ seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum);
+
+ return 0;
+}
+
static int __init ocfs2_init(void)
{
int status;
@@ -1209,12 +1246,13 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
tmp = ocfs2_request_umount_vote(osb);
if (tmp < 0)
mlog_errno(tmp);
+ }
- if (osb->slot_num != OCFS2_INVALID_SLOT)
- ocfs2_put_slot(osb);
+ if (osb->slot_num != OCFS2_INVALID_SLOT)
+ ocfs2_put_slot(osb);
+ if (osb->dlm)
ocfs2_super_unlock(osb, 1);
- }
ocfs2_release_system_inodes(osb);
@@ -1275,7 +1313,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
struct buffer_head *bh,
int sector_size)
{
- int status = 0;
+ int status;
int i, cbits, bbits;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
struct inode *inode = NULL;
@@ -1596,7 +1634,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
static int ocfs2_check_volume(struct ocfs2_super *osb)
{
- int status = 0;
+ int status;
int dirty;
int local;
struct ocfs2_dinode *local_alloc = NULL; /* only used if we
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index 5df6e35d09b..fd2e846e3e6 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -100,17 +100,14 @@ static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
char namebuf[40];
struct inode *inode = NULL;
u64 blkno;
- struct buffer_head *dirent_bh = NULL;
- struct ocfs2_dir_entry *de = NULL;
int status = 0;
ocfs2_sprintf_system_inode_name(namebuf,
sizeof(namebuf),
type, slot);
- status = ocfs2_find_files_on_disk(namebuf, strlen(namebuf),
- &blkno, osb->sys_root_inode,
- &dirent_bh, &de);
+ status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
+ strlen(namebuf), &blkno);
if (status < 0) {
goto bail;
}
@@ -122,8 +119,7 @@ static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
goto bail;
}
bail:
- if (dirent_bh)
- brelse(dirent_bh);
+
return inode;
}
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 783c57ec07d..722e12e5acc 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -381,10 +381,12 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len,
p->partno = part;
p->policy = disk->policy;
- if (isdigit(disk->kobj.name[strlen(disk->kobj.name)-1]))
- snprintf(p->kobj.name,KOBJ_NAME_LEN,"%sp%d",disk->kobj.name,part);
+ if (isdigit(disk->kobj.k_name[strlen(disk->kobj.k_name)-1]))
+ kobject_set_name(&p->kobj, "%sp%d",
+ kobject_name(&disk->kobj), part);
else
- snprintf(p->kobj.name,KOBJ_NAME_LEN,"%s%d",disk->kobj.name,part);
+ kobject_set_name(&p->kobj, "%s%d",
+ kobject_name(&disk->kobj),part);
p->kobj.parent = &disk->kobj;
p->kobj.ktype = &ktype_part;
kobject_init(&p->kobj);
@@ -477,9 +479,9 @@ void register_disk(struct gendisk *disk)
struct hd_struct *p;
int err;
- strlcpy(disk->kobj.name,disk->disk_name,KOBJ_NAME_LEN);
+ kobject_set_name(&disk->kobj, "%s", disk->disk_name);
/* ewww... some of these buggers have / in name... */
- s = strchr(disk->kobj.name, '/');
+ s = strchr(disk->kobj.k_name, '/');
if (s)
*s = '!';
if ((err = kobject_add(&disk->kobj)))
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 5afe2a26f5d..006fc64227d 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -1,9 +1,15 @@
/*
- * bin.c - binary file operations for sysfs.
+ * fs/sysfs/bin.c - sysfs binary file implementation
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Matthew Wilcox
* Copyright (c) 2004 Silicon Graphics, Inc.
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#undef DEBUG
@@ -14,9 +20,9 @@
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
@@ -30,8 +36,8 @@ static int
fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_elem.bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
int rc;
/* need attr_sd for attr, its parent for kobj */
@@ -87,8 +93,8 @@ static int
flush_write(struct dentry *dentry, char *buffer, loff_t offset, size_t count)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_elem.bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
int rc;
/* need attr_sd for attr, its parent for kobj */
@@ -140,8 +146,8 @@ static int mmap(struct file *file, struct vm_area_struct *vma)
{
struct bin_buffer *bb = file->private_data;
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_elem.bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
int rc;
mutex_lock(&bb->mutex);
@@ -167,12 +173,12 @@ static int mmap(struct file *file, struct vm_area_struct *vma)
static int open(struct inode * inode, struct file * file)
{
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_elem.bin_attr.bin_attr;
+ struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
struct bin_buffer *bb = NULL;
int error;
- /* need attr_sd for attr */
- if (!sysfs_get_active(attr_sd))
+ /* binary file operations requires both @sd and its parent */
+ if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
error = -EACCES;
@@ -193,13 +199,12 @@ static int open(struct inode * inode, struct file * file)
mutex_init(&bb->mutex);
file->private_data = bb;
- /* open succeeded, put active reference and pin attr_sd */
- sysfs_put_active(attr_sd);
- sysfs_get(attr_sd);
+ /* open succeeded, put active references */
+ sysfs_put_active_two(attr_sd);
return 0;
err_out:
- sysfs_put_active(attr_sd);
+ sysfs_put_active_two(attr_sd);
kfree(bb);
return error;
}
@@ -211,7 +216,6 @@ static int release(struct inode * inode, struct file * file)
if (bb->mmapped)
sysfs_put_active_two(attr_sd);
- sysfs_put(attr_sd);
kfree(bb->buffer);
kfree(bb);
return 0;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 83e76b3813c..9161db4d6b5 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -1,5 +1,13 @@
/*
- * dir.c - Operations for sysfs directories.
+ * fs/sysfs/dir.c - sysfs core and dir operation implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#undef DEBUG
@@ -11,10 +19,11 @@
#include <linux/namei.h>
#include <linux/idr.h>
#include <linux/completion.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include "sysfs.h"
DEFINE_MUTEX(sysfs_mutex);
+DEFINE_MUTEX(sysfs_rename_mutex);
spinlock_t sysfs_assoc_lock = SPIN_LOCK_UNLOCKED;
static spinlock_t sysfs_ino_lock = SPIN_LOCK_UNLOCKED;
@@ -25,18 +34,28 @@ static DEFINE_IDA(sysfs_ino_ida);
* @sd: sysfs_dirent of interest
*
* Link @sd into its sibling list which starts from
- * sd->s_parent->s_children.
+ * sd->s_parent->s_dir.children.
*
* Locking:
* mutex_lock(sysfs_mutex)
*/
-void sysfs_link_sibling(struct sysfs_dirent *sd)
+static void sysfs_link_sibling(struct sysfs_dirent *sd)
{
struct sysfs_dirent *parent_sd = sd->s_parent;
+ struct sysfs_dirent **pos;
BUG_ON(sd->s_sibling);
- sd->s_sibling = parent_sd->s_children;
- parent_sd->s_children = sd;
+
+ /* Store directory entries in order by ino. This allows
+ * readdir to properly restart without having to add a
+ * cursor into the s_dir.children list.
+ */
+ for (pos = &parent_sd->s_dir.children; *pos; pos = &(*pos)->s_sibling) {
+ if (sd->s_ino < (*pos)->s_ino)
+ break;
+ }
+ sd->s_sibling = *pos;
+ *pos = sd;
}
/**
@@ -44,16 +63,17 @@ void sysfs_link_sibling(struct sysfs_dirent *sd)
* @sd: sysfs_dirent of interest
*
* Unlink @sd from its sibling list which starts from
- * sd->s_parent->s_children.
+ * sd->s_parent->s_dir.children.
*
* Locking:
* mutex_lock(sysfs_mutex)
*/
-void sysfs_unlink_sibling(struct sysfs_dirent *sd)
+static void sysfs_unlink_sibling(struct sysfs_dirent *sd)
{
struct sysfs_dirent **pos;
- for (pos = &sd->s_parent->s_children; *pos; pos = &(*pos)->s_sibling) {
+ for (pos = &sd->s_parent->s_dir.children; *pos;
+ pos = &(*pos)->s_sibling) {
if (*pos == sd) {
*pos = sd->s_sibling;
sd->s_sibling = NULL;
@@ -67,96 +87,39 @@ void sysfs_unlink_sibling(struct sysfs_dirent *sd)
* @sd: sysfs_dirent of interest
*
* Get dentry for @sd. Dentry is looked up if currently not
- * present. This function climbs sysfs_dirent tree till it
- * reaches a sysfs_dirent with valid dentry attached and descends
- * down from there looking up dentry for each step.
+ * present. This function descends from the root looking up
+ * dentry for each step.
*
* LOCKING:
- * Kernel thread context (may sleep)
+ * mutex_lock(sysfs_rename_mutex)
*
* RETURNS:
* Pointer to found dentry on success, ERR_PTR() value on error.
*/
struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd)
{
- struct sysfs_dirent *cur;
- struct dentry *parent_dentry, *dentry;
- int i, depth;
-
- /* Find the first parent which has valid s_dentry and get the
- * dentry.
- */
- mutex_lock(&sysfs_mutex);
- restart0:
- spin_lock(&sysfs_assoc_lock);
- restart1:
- spin_lock(&dcache_lock);
+ struct dentry *dentry = dget(sysfs_sb->s_root);
- dentry = NULL;
- depth = 0;
- cur = sd;
- while (!cur->s_dentry || !cur->s_dentry->d_inode) {
- if (cur->s_flags & SYSFS_FLAG_REMOVED) {
- dentry = ERR_PTR(-ENOENT);
- depth = 0;
- break;
- }
- cur = cur->s_parent;
- depth++;
- }
- if (!IS_ERR(dentry))
- dentry = dget_locked(cur->s_dentry);
+ while (dentry->d_fsdata != sd) {
+ struct sysfs_dirent *cur;
+ struct dentry *parent;
- spin_unlock(&dcache_lock);
- spin_unlock(&sysfs_assoc_lock);
-
- /* from the found dentry, look up depth times */
- while (depth--) {
- /* find and get depth'th ancestor */
- for (cur = sd, i = 0; cur && i < depth; i++)
+ /* find the first ancestor which hasn't been looked up */
+ cur = sd;
+ while (cur->s_parent != dentry->d_fsdata)
cur = cur->s_parent;
- /* This can happen if tree structure was modified due
- * to move/rename. Restart.
- */
- if (i != depth) {
- dput(dentry);
- goto restart0;
- }
-
- sysfs_get(cur);
-
- mutex_unlock(&sysfs_mutex);
-
/* look it up */
- parent_dentry = dentry;
- dentry = lookup_one_len_kern(cur->s_name, parent_dentry,
+ parent = dentry;
+ mutex_lock(&parent->d_inode->i_mutex);
+ dentry = lookup_one_len_kern(cur->s_name, parent,
strlen(cur->s_name));
- dput(parent_dentry);
-
- if (IS_ERR(dentry)) {
- sysfs_put(cur);
- return dentry;
- }
+ mutex_unlock(&parent->d_inode->i_mutex);
+ dput(parent);
- mutex_lock(&sysfs_mutex);
- spin_lock(&sysfs_assoc_lock);
-
- /* This, again, can happen if tree structure has
- * changed and we looked up the wrong thing. Restart.
- */
- if (cur->s_dentry != dentry) {
- dput(dentry);
- sysfs_put(cur);
- goto restart1;
- }
-
- spin_unlock(&sysfs_assoc_lock);
-
- sysfs_put(cur);
+ if (IS_ERR(dentry))
+ break;
}
-
- mutex_unlock(&sysfs_mutex);
return dentry;
}
@@ -319,7 +282,7 @@ void release_sysfs_dirent(struct sysfs_dirent * sd)
parent_sd = sd->s_parent;
if (sysfs_type(sd) == SYSFS_KOBJ_LINK)
- sysfs_put(sd->s_elem.symlink.target_sd);
+ sysfs_put(sd->s_symlink.target_sd);
if (sysfs_type(sd) & SYSFS_COPY_NAME)
kfree(sd->s_name);
kfree(sd->s_iattr);
@@ -335,22 +298,7 @@ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
{
struct sysfs_dirent * sd = dentry->d_fsdata;
- if (sd) {
- /* sd->s_dentry is protected with sysfs_assoc_lock.
- * This allows sysfs_drop_dentry() to dereference it.
- */
- spin_lock(&sysfs_assoc_lock);
-
- /* The dentry might have been deleted or another
- * lookup could have happened updating sd->s_dentry to
- * point the new dentry. Ignore if it isn't pointing
- * to this dentry.
- */
- if (sd->s_dentry == dentry)
- sd->s_dentry = NULL;
- spin_unlock(&sysfs_assoc_lock);
- sysfs_put(sd);
- }
+ sysfs_put(sd);
iput(inode);
}
@@ -378,7 +326,6 @@ struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
atomic_set(&sd->s_count, 1);
atomic_set(&sd->s_active, 0);
- atomic_set(&sd->s_event, 1);
sd->s_name = name;
sd->s_mode = mode;
@@ -393,30 +340,6 @@ struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
return NULL;
}
-/**
- * sysfs_attach_dentry - associate sysfs_dirent with dentry
- * @sd: target sysfs_dirent
- * @dentry: dentry to associate
- *
- * Associate @sd with @dentry. This is protected by
- * sysfs_assoc_lock to avoid race with sysfs_d_iput().
- *
- * LOCKING:
- * mutex_lock(sysfs_mutex)
- */
-static void sysfs_attach_dentry(struct sysfs_dirent *sd, struct dentry *dentry)
-{
- dentry->d_op = &sysfs_dentry_ops;
- dentry->d_fsdata = sysfs_get(sd);
-
- /* protect sd->s_dentry against sysfs_d_iput */
- spin_lock(&sysfs_assoc_lock);
- sd->s_dentry = dentry;
- spin_unlock(&sysfs_assoc_lock);
-
- d_rehash(dentry);
-}
-
static int sysfs_ilookup_test(struct inode *inode, void *arg)
{
struct sysfs_dirent *sd = arg;
@@ -480,10 +403,8 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
* @sd: sysfs_dirent to be added
*
* Get @acxt->parent_sd and set sd->s_parent to it and increment
- * nlink of parent inode if @sd is a directory. @sd is NOT
- * linked into the children list of the parent. The caller
- * should invoke sysfs_link_sibling() after this function
- * completes if @sd needs to be on the children list.
+ * nlink of parent inode if @sd is a directory and link into the
+ * children list of the parent.
*
* This function should be called between calls to
* sysfs_addrm_start() and sysfs_addrm_finish() and should be
@@ -491,15 +412,30 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
*
* LOCKING:
* Determined by sysfs_addrm_start().
+ *
+ * RETURNS:
+ * 0 on success, -EEXIST if entry with the given name already
+ * exists.
*/
-void sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
+ if (sysfs_find_dirent(acxt->parent_sd, sd->s_name)) {
+ printk(KERN_WARNING "sysfs: duplicate filename '%s' "
+ "can not be created\n", sd->s_name);
+ WARN_ON(1);
+ return -EEXIST;
+ }
+
sd->s_parent = sysfs_get(acxt->parent_sd);
if (sysfs_type(sd) == SYSFS_DIR && acxt->parent_inode)
inc_nlink(acxt->parent_inode);
acxt->cnt++;
+
+ sysfs_link_sibling(sd);
+
+ return 0;
}
/**
@@ -508,9 +444,7 @@ void sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
* @sd: sysfs_dirent to be added
*
* Mark @sd removed and drop nlink of parent inode if @sd is a
- * directory. @sd is NOT unlinked from the children list of the
- * parent. The caller is repsonsible for removing @sd from the
- * children list before calling this function.
+ * directory. @sd is unlinked from the children list.
*
* This function should be called between calls to
* sysfs_addrm_start() and sysfs_addrm_finish() and should be
@@ -521,7 +455,9 @@ void sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
*/
void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
- BUG_ON(sd->s_sibling || (sd->s_flags & SYSFS_FLAG_REMOVED));
+ BUG_ON(sd->s_flags & SYSFS_FLAG_REMOVED);
+
+ sysfs_unlink_sibling(sd);
sd->s_flags |= SYSFS_FLAG_REMOVED;
sd->s_sibling = acxt->removed;
@@ -540,53 +476,49 @@ void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
* Drop dentry for @sd. @sd must have been unlinked from its
* parent on entry to this function such that it can't be looked
* up anymore.
- *
- * @sd->s_dentry which is protected with sysfs_assoc_lock points
- * to the currently associated dentry but we're not holding a
- * reference to it and racing with dput(). Grab dcache_lock and
- * verify dentry before dropping it. If @sd->s_dentry is NULL or
- * dput() beats us, no need to bother.
*/
static void sysfs_drop_dentry(struct sysfs_dirent *sd)
{
- struct dentry *dentry = NULL;
struct inode *inode;
+ struct dentry *dentry;
- /* We're not holding a reference to ->s_dentry dentry but the
- * field will stay valid as long as sysfs_assoc_lock is held.
+ inode = ilookup(sysfs_sb, sd->s_ino);
+ if (!inode)
+ return;
+
+ /* Drop any existing dentries associated with sd.
+ *
+ * For the dentry to be properly freed we need to grab a
+ * reference to the dentry under the dcache lock, unhash it,
+ * and then put it. The playing with the dentry count allows
+ * dput to immediately free the dentry if it is not in use.
*/
- spin_lock(&sysfs_assoc_lock);
+repeat:
spin_lock(&dcache_lock);
-
- /* drop dentry if it's there and dput() didn't kill it yet */
- if (sd->s_dentry && sd->s_dentry->d_inode) {
- dentry = dget_locked(sd->s_dentry);
+ list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ if (d_unhashed(dentry))
+ continue;
+ dget_locked(dentry);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_lock);
+ dput(dentry);
+ goto repeat;
}
-
spin_unlock(&dcache_lock);
- spin_unlock(&sysfs_assoc_lock);
-
- /* dentries for shadowed inodes are pinned, unpin */
- if (dentry && sysfs_is_shadowed_inode(dentry->d_inode))
- dput(dentry);
- dput(dentry);
/* adjust nlink and update timestamp */
- inode = ilookup(sysfs_sb, sd->s_ino);
- if (inode) {
- mutex_lock(&inode->i_mutex);
+ mutex_lock(&inode->i_mutex);
- inode->i_ctime = CURRENT_TIME;
+ inode->i_ctime = CURRENT_TIME;
+ drop_nlink(inode);
+ if (sysfs_type(sd) == SYSFS_DIR)
drop_nlink(inode);
- if (sysfs_type(sd) == SYSFS_DIR)
- drop_nlink(inode);
- mutex_unlock(&inode->i_mutex);
- iput(inode);
- }
+ mutex_unlock(&inode->i_mutex);
+
+ iput(inode);
}
/**
@@ -599,11 +531,8 @@ static void sysfs_drop_dentry(struct sysfs_dirent *sd)
*
* LOCKING:
* All mutexes acquired by sysfs_addrm_start() are released.
- *
- * RETURNS:
- * Number of added/removed sysfs_dirents since sysfs_addrm_start().
*/
-int sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
+void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
{
/* release resources acquired by sysfs_addrm_start() */
mutex_unlock(&sysfs_mutex);
@@ -629,8 +558,6 @@ int sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
sysfs_deactivate(sd);
sysfs_put(sd);
}
-
- return acxt->cnt;
}
/**
@@ -651,8 +578,8 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
{
struct sysfs_dirent *sd;
- for (sd = parent_sd->s_children; sd; sd = sd->s_sibling)
- if (sysfs_type(sd) && !strcmp(sd->s_name, name))
+ for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling)
+ if (!strcmp(sd->s_name, name))
return sd;
return NULL;
}
@@ -690,28 +617,25 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
+ int rc;
/* allocate */
sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
if (!sd)
return -ENOMEM;
- sd->s_elem.dir.kobj = kobj;
+ sd->s_dir.kobj = kobj;
/* link in */
sysfs_addrm_start(&acxt, parent_sd);
+ rc = sysfs_add_one(&acxt, sd);
+ sysfs_addrm_finish(&acxt);
- if (!sysfs_find_dirent(parent_sd, name)) {
- sysfs_add_one(&acxt, sd);
- sysfs_link_sibling(sd);
- }
-
- if (!sysfs_addrm_finish(&acxt)) {
+ if (rc == 0)
+ *p_sd = sd;
+ else
sysfs_put(sd);
- return -EEXIST;
- }
- *p_sd = sd;
- return 0;
+ return rc;
}
int sysfs_create_subdir(struct kobject *kobj, const char *name,
@@ -723,24 +647,18 @@ int sysfs_create_subdir(struct kobject *kobj, const char *name,
/**
* sysfs_create_dir - create a directory for an object.
* @kobj: object we're creating directory for.
- * @shadow_parent: parent object.
*/
-int sysfs_create_dir(struct kobject *kobj,
- struct sysfs_dirent *shadow_parent_sd)
+int sysfs_create_dir(struct kobject * kobj)
{
struct sysfs_dirent *parent_sd, *sd;
int error = 0;
BUG_ON(!kobj);
- if (shadow_parent_sd)
- parent_sd = shadow_parent_sd;
- else if (kobj->parent)
+ if (kobj->parent)
parent_sd = kobj->parent->sd;
- else if (sysfs_mount && sysfs_mount->mnt_sb)
- parent_sd = sysfs_mount->mnt_sb->s_root->d_fsdata;
else
- return -EFAULT;
+ parent_sd = &sysfs_root;
error = create_dir(kobj, parent_sd, kobject_name(kobj), &sd);
if (!error)
@@ -748,39 +666,20 @@ int sysfs_create_dir(struct kobject *kobj,
return error;
}
-static int sysfs_count_nlink(struct sysfs_dirent *sd)
-{
- struct sysfs_dirent *child;
- int nr = 0;
-
- for (child = sd->s_children; child; child = child->s_sibling)
- if (sysfs_type(child) == SYSFS_DIR)
- nr++;
- return nr + 2;
-}
-
static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct dentry *ret = NULL;
- struct sysfs_dirent * parent_sd = dentry->d_parent->d_fsdata;
- struct sysfs_dirent * sd;
- struct bin_attribute *bin_attr;
+ struct sysfs_dirent *parent_sd = dentry->d_parent->d_fsdata;
+ struct sysfs_dirent *sd;
struct inode *inode;
- int found = 0;
mutex_lock(&sysfs_mutex);
- for (sd = parent_sd->s_children; sd; sd = sd->s_sibling) {
- if (sysfs_type(sd) &&
- !strcmp(sd->s_name, dentry->d_name.name)) {
- found = 1;
- break;
- }
- }
+ sd = sysfs_find_dirent(parent_sd, dentry->d_name.name);
/* no such entry */
- if (!found)
+ if (!sd)
goto out_unlock;
/* attach dentry and inode */
@@ -790,33 +689,11 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
- if (inode->i_state & I_NEW) {
- /* initialize inode according to type */
- switch (sysfs_type(sd)) {
- case SYSFS_DIR:
- inode->i_op = &sysfs_dir_inode_operations;
- inode->i_fop = &sysfs_dir_operations;
- inode->i_nlink = sysfs_count_nlink(sd);
- break;
- case SYSFS_KOBJ_ATTR:
- inode->i_size = PAGE_SIZE;
- inode->i_fop = &sysfs_file_operations;
- break;
- case SYSFS_KOBJ_BIN_ATTR:
- bin_attr = sd->s_elem.bin_attr.bin_attr;
- inode->i_size = bin_attr->size;
- inode->i_fop = &bin_fops;
- break;
- case SYSFS_KOBJ_LINK:
- inode->i_op = &sysfs_symlink_inode_operations;
- break;
- default:
- BUG();
- }
- }
-
- sysfs_instantiate(dentry, inode);
- sysfs_attach_dentry(sd, dentry);
+ /* instantiate and hash dentry */
+ dentry->d_op = &sysfs_dentry_ops;
+ dentry->d_fsdata = sysfs_get(sd);
+ d_instantiate(dentry, inode);
+ d_rehash(dentry);
out_unlock:
mutex_unlock(&sysfs_mutex);
@@ -833,7 +710,6 @@ static void remove_dir(struct sysfs_dirent *sd)
struct sysfs_addrm_cxt acxt;
sysfs_addrm_start(&acxt, sd->s_parent);
- sysfs_unlink_sibling(sd);
sysfs_remove_one(&acxt, sd);
sysfs_addrm_finish(&acxt);
}
@@ -854,15 +730,13 @@ static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd)
pr_debug("sysfs %s: removing dir\n", dir_sd->s_name);
sysfs_addrm_start(&acxt, dir_sd);
- pos = &dir_sd->s_children;
+ pos = &dir_sd->s_dir.children;
while (*pos) {
struct sysfs_dirent *sd = *pos;
- if (sysfs_type(sd) && sysfs_type(sd) != SYSFS_DIR) {
- *pos = sd->s_sibling;
- sd->s_sibling = NULL;
+ if (sysfs_type(sd) != SYSFS_DIR)
sysfs_remove_one(&acxt, sd);
- } else
+ else
pos = &(*pos)->s_sibling;
}
sysfs_addrm_finish(&acxt);
@@ -890,90 +764,68 @@ void sysfs_remove_dir(struct kobject * kobj)
__sysfs_remove_dir(sd);
}
-int sysfs_rename_dir(struct kobject *kobj, struct sysfs_dirent *new_parent_sd,
- const char *new_name)
+int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
{
struct sysfs_dirent *sd = kobj->sd;
- struct dentry *new_parent = NULL;
+ struct dentry *parent = NULL;
struct dentry *old_dentry = NULL, *new_dentry = NULL;
const char *dup_name = NULL;
int error;
- /* get dentries */
+ mutex_lock(&sysfs_rename_mutex);
+
+ error = 0;
+ if (strcmp(sd->s_name, new_name) == 0)
+ goto out; /* nothing to rename */
+
+ /* get the original dentry */
old_dentry = sysfs_get_dentry(sd);
if (IS_ERR(old_dentry)) {
error = PTR_ERR(old_dentry);
- goto out_dput;
- }
-
- new_parent = sysfs_get_dentry(new_parent_sd);
- if (IS_ERR(new_parent)) {
- error = PTR_ERR(new_parent);
- goto out_dput;
+ goto out;
}
- /* lock new_parent and get dentry for new name */
- mutex_lock(&new_parent->d_inode->i_mutex);
+ parent = old_dentry->d_parent;
- new_dentry = lookup_one_len(new_name, new_parent, strlen(new_name));
- if (IS_ERR(new_dentry)) {
- error = PTR_ERR(new_dentry);
- goto out_unlock;
- }
+ /* lock parent and get dentry for new name */
+ mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&sysfs_mutex);
- /* By allowing two different directories with the same
- * d_parent we allow this routine to move between different
- * shadows of the same directory
- */
- error = -EINVAL;
- if (old_dentry->d_parent->d_inode != new_parent->d_inode ||
- new_dentry->d_parent->d_inode != new_parent->d_inode ||
- old_dentry == new_dentry)
+ error = -EEXIST;
+ if (sysfs_find_dirent(sd->s_parent, new_name))
goto out_unlock;
- error = -EEXIST;
- if (new_dentry->d_inode)
+ error = -ENOMEM;
+ new_dentry = d_alloc_name(parent, new_name);
+ if (!new_dentry)
goto out_unlock;
/* rename kobject and sysfs_dirent */
error = -ENOMEM;
new_name = dup_name = kstrdup(new_name, GFP_KERNEL);
if (!new_name)
- goto out_drop;
+ goto out_unlock;
error = kobject_set_name(kobj, "%s", new_name);
if (error)
- goto out_drop;
-
- mutex_lock(&sysfs_mutex);
+ goto out_unlock;
dup_name = sd->s_name;
sd->s_name = new_name;
- /* move under the new parent */
+ /* rename */
d_add(new_dentry, NULL);
- d_move(sd->s_dentry, new_dentry);
-
- sysfs_unlink_sibling(sd);
- sysfs_get(new_parent_sd);
- sysfs_put(sd->s_parent);
- sd->s_parent = new_parent_sd;
- sysfs_link_sibling(sd);
-
- mutex_unlock(&sysfs_mutex);
+ d_move(old_dentry, new_dentry);
error = 0;
- goto out_unlock;
-
- out_drop:
- d_drop(new_dentry);
out_unlock:
- mutex_unlock(&new_parent->d_inode->i_mutex);
- out_dput:
+ mutex_unlock(&sysfs_mutex);
+ mutex_unlock(&parent->d_inode->i_mutex);
kfree(dup_name);
- dput(new_parent);
dput(old_dentry);
dput(new_dentry);
+ out:
+ mutex_unlock(&sysfs_rename_mutex);
return error;
}
@@ -985,96 +837,69 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj)
struct dentry *old_dentry = NULL, *new_dentry = NULL;
int error;
+ mutex_lock(&sysfs_rename_mutex);
BUG_ON(!sd->s_parent);
new_parent_sd = new_parent_kobj->sd ? new_parent_kobj->sd : &sysfs_root;
+ error = 0;
+ if (sd->s_parent == new_parent_sd)
+ goto out; /* nothing to move */
+
/* get dentries */
old_dentry = sysfs_get_dentry(sd);
if (IS_ERR(old_dentry)) {
error = PTR_ERR(old_dentry);
- goto out_dput;
+ goto out;
}
- old_parent = sd->s_parent->s_dentry;
+ old_parent = old_dentry->d_parent;
new_parent = sysfs_get_dentry(new_parent_sd);
if (IS_ERR(new_parent)) {
error = PTR_ERR(new_parent);
- goto out_dput;
+ goto out;
}
- if (old_parent->d_inode == new_parent->d_inode) {
- error = 0;
- goto out_dput; /* nothing to move */
- }
again:
mutex_lock(&old_parent->d_inode->i_mutex);
if (!mutex_trylock(&new_parent->d_inode->i_mutex)) {
mutex_unlock(&old_parent->d_inode->i_mutex);
goto again;
}
+ mutex_lock(&sysfs_mutex);
- new_dentry = lookup_one_len(kobj->name, new_parent, strlen(kobj->name));
- if (IS_ERR(new_dentry)) {
- error = PTR_ERR(new_dentry);
+ error = -EEXIST;
+ if (sysfs_find_dirent(new_parent_sd, sd->s_name))
goto out_unlock;
- } else
- error = 0;
+
+ error = -ENOMEM;
+ new_dentry = d_alloc_name(new_parent, sd->s_name);
+ if (!new_dentry)
+ goto out_unlock;
+
+ error = 0;
d_add(new_dentry, NULL);
- d_move(sd->s_dentry, new_dentry);
+ d_move(old_dentry, new_dentry);
dput(new_dentry);
/* Remove from old parent's list and insert into new parent's list. */
- mutex_lock(&sysfs_mutex);
-
sysfs_unlink_sibling(sd);
sysfs_get(new_parent_sd);
sysfs_put(sd->s_parent);
sd->s_parent = new_parent_sd;
sysfs_link_sibling(sd);
- mutex_unlock(&sysfs_mutex);
-
out_unlock:
+ mutex_unlock(&sysfs_mutex);
mutex_unlock(&new_parent->d_inode->i_mutex);
mutex_unlock(&old_parent->d_inode->i_mutex);
- out_dput:
+ out:
dput(new_parent);
dput(old_dentry);
dput(new_dentry);
+ mutex_unlock(&sysfs_rename_mutex);
return error;
}
-static int sysfs_dir_open(struct inode *inode, struct file *file)
-{
- struct dentry * dentry = file->f_path.dentry;
- struct sysfs_dirent * parent_sd = dentry->d_fsdata;
- struct sysfs_dirent * sd;
-
- sd = sysfs_new_dirent("_DIR_", 0, 0);
- if (sd) {
- mutex_lock(&sysfs_mutex);
- sd->s_parent = sysfs_get(parent_sd);
- sysfs_link_sibling(sd);
- mutex_unlock(&sysfs_mutex);
- }
-
- file->private_data = sd;
- return sd ? 0 : -ENOMEM;
-}
-
-static int sysfs_dir_close(struct inode *inode, struct file *file)
-{
- struct sysfs_dirent * cursor = file->private_data;
-
- mutex_lock(&sysfs_mutex);
- sysfs_unlink_sibling(cursor);
- mutex_unlock(&sysfs_mutex);
-
- release_sysfs_dirent(cursor);
-
- return 0;
-}
-
/* Relationship between s_mode and the DT_xxx types */
static inline unsigned char dt_type(struct sysfs_dirent *sd)
{
@@ -1085,232 +910,51 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
struct dentry *dentry = filp->f_path.dentry;
struct sysfs_dirent * parent_sd = dentry->d_fsdata;
- struct sysfs_dirent *cursor = filp->private_data;
- struct sysfs_dirent **pos;
+ struct sysfs_dirent *pos;
ino_t ino;
- int i = filp->f_pos;
- switch (i) {
- case 0:
- ino = parent_sd->s_ino;
- if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
- break;
+ if (filp->f_pos == 0) {
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
filp->f_pos++;
- i++;
- /* fallthrough */
- case 1:
- if (parent_sd->s_parent)
- ino = parent_sd->s_parent->s_ino;
- else
- ino = parent_sd->s_ino;
- if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
- break;
+ }
+ if (filp->f_pos == 1) {
+ if (parent_sd->s_parent)
+ ino = parent_sd->s_parent->s_ino;
+ else
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
filp->f_pos++;
- i++;
- /* fallthrough */
- default:
- mutex_lock(&sysfs_mutex);
-
- pos = &parent_sd->s_children;
- while (*pos != cursor)
- pos = &(*pos)->s_sibling;
-
- /* unlink cursor */
- *pos = cursor->s_sibling;
-
- if (filp->f_pos == 2)
- pos = &parent_sd->s_children;
-
- for ( ; *pos; pos = &(*pos)->s_sibling) {
- struct sysfs_dirent *next = *pos;
- const char * name;
- int len;
-
- if (!sysfs_type(next))
- continue;
-
- name = next->s_name;
- len = strlen(name);
- ino = next->s_ino;
-
- if (filldir(dirent, name, len, filp->f_pos, ino,
- dt_type(next)) < 0)
- break;
+ }
+ if ((filp->f_pos > 1) && (filp->f_pos < INT_MAX)) {
+ mutex_lock(&sysfs_mutex);
- filp->f_pos++;
- }
+ /* Skip the dentries we have already reported */
+ pos = parent_sd->s_dir.children;
+ while (pos && (filp->f_pos > pos->s_ino))
+ pos = pos->s_sibling;
- /* put cursor back in */
- cursor->s_sibling = *pos;
- *pos = cursor;
+ for ( ; pos; pos = pos->s_sibling) {
+ const char * name;
+ int len;
- mutex_unlock(&sysfs_mutex);
- }
- return 0;
-}
-
-static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
-{
- struct dentry * dentry = file->f_path.dentry;
+ name = pos->s_name;
+ len = strlen(name);
+ filp->f_pos = ino = pos->s_ino;
- switch (origin) {
- case 1:
- offset += file->f_pos;
- case 0:
- if (offset >= 0)
+ if (filldir(dirent, name, len, filp->f_pos, ino,
+ dt_type(pos)) < 0)
break;
- default:
- return -EINVAL;
- }
- if (offset != file->f_pos) {
- mutex_lock(&sysfs_mutex);
-
- file->f_pos = offset;
- if (file->f_pos >= 2) {
- struct sysfs_dirent *sd = dentry->d_fsdata;
- struct sysfs_dirent *cursor = file->private_data;
- struct sysfs_dirent **pos;
- loff_t n = file->f_pos - 2;
-
- sysfs_unlink_sibling(cursor);
-
- pos = &sd->s_children;
- while (n && *pos) {
- struct sysfs_dirent *next = *pos;
- if (sysfs_type(next))
- n--;
- pos = &(*pos)->s_sibling;
- }
-
- cursor->s_sibling = *pos;
- *pos = cursor;
}
-
+ if (!pos)
+ filp->f_pos = INT_MAX;
mutex_unlock(&sysfs_mutex);
}
-
- return offset;
-}
-
-
-/**
- * sysfs_make_shadowed_dir - Setup so a directory can be shadowed
- * @kobj: object we're creating shadow of.
- */
-
-int sysfs_make_shadowed_dir(struct kobject *kobj,
- void * (*follow_link)(struct dentry *, struct nameidata *))
-{
- struct dentry *dentry;
- struct inode *inode;
- struct inode_operations *i_op;
-
- /* get dentry for @kobj->sd, dentry of a shadowed dir is pinned */
- dentry = sysfs_get_dentry(kobj->sd);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
-
- inode = dentry->d_inode;
- if (inode->i_op != &sysfs_dir_inode_operations) {
- dput(dentry);
- return -EINVAL;
- }
-
- i_op = kmalloc(sizeof(*i_op), GFP_KERNEL);
- if (!i_op)
- return -ENOMEM;
-
- memcpy(i_op, &sysfs_dir_inode_operations, sizeof(*i_op));
- i_op->follow_link = follow_link;
-
- /* Locking of inode->i_op?
- * Since setting i_op is a single word write and they
- * are atomic we should be ok here.
- */
- inode->i_op = i_op;
return 0;
}
-/**
- * sysfs_create_shadow_dir - create a shadow directory for an object.
- * @kobj: object we're creating directory for.
- *
- * sysfs_make_shadowed_dir must already have been called on this
- * directory.
- */
-
-struct sysfs_dirent *sysfs_create_shadow_dir(struct kobject *kobj)
-{
- struct sysfs_dirent *parent_sd = kobj->sd->s_parent;
- struct dentry *dir, *parent, *shadow;
- struct inode *inode;
- struct sysfs_dirent *sd;
- struct sysfs_addrm_cxt acxt;
-
- dir = sysfs_get_dentry(kobj->sd);
- if (IS_ERR(dir)) {
- sd = (void *)dir;
- goto out;
- }
- parent = dir->d_parent;
-
- inode = dir->d_inode;
- sd = ERR_PTR(-EINVAL);
- if (!sysfs_is_shadowed_inode(inode))
- goto out_dput;
-
- shadow = d_alloc(parent, &dir->d_name);
- if (!shadow)
- goto nomem;
-
- sd = sysfs_new_dirent("_SHADOW_", inode->i_mode, SYSFS_DIR);
- if (!sd)
- goto nomem;
- sd->s_elem.dir.kobj = kobj;
-
- sysfs_addrm_start(&acxt, parent_sd);
-
- /* add but don't link into children list */
- sysfs_add_one(&acxt, sd);
-
- /* attach and instantiate dentry */
- sysfs_attach_dentry(sd, shadow);
- d_instantiate(shadow, igrab(inode));
- inc_nlink(inode); /* tj: synchronization? */
-
- sysfs_addrm_finish(&acxt);
-
- dget(shadow); /* Extra count - pin the dentry in core */
-
- goto out_dput;
-
- nomem:
- dput(shadow);
- sd = ERR_PTR(-ENOMEM);
- out_dput:
- dput(dir);
- out:
- return sd;
-}
-
-/**
- * sysfs_remove_shadow_dir - remove an object's directory.
- * @shadow_sd: sysfs_dirent of shadow directory
- *
- * The only thing special about this is that we remove any files in
- * the directory before we remove the directory, and we've inlined
- * what used to be sysfs_rmdir() below, instead of calling separately.
- */
-
-void sysfs_remove_shadow_dir(struct sysfs_dirent *shadow_sd)
-{
- __sysfs_remove_dir(shadow_sd);
-}
const struct file_operations sysfs_dir_operations = {
- .open = sysfs_dir_open,
- .release = sysfs_dir_close,
- .llseek = sysfs_dir_lseek,
.read = generic_read_dir,
.readdir = sysfs_readdir,
};
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 3e1cc062a74..d3be1e7fb48 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -1,15 +1,22 @@
/*
- * file.c - operations for regular (text) files.
+ * fs/sysfs/file.c - sysfs regular (text) file implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#include <linux/module.h>
-#include <linux/fsnotify.h>
#include <linux/kobject.h>
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
@@ -50,14 +57,33 @@ static struct sysfs_ops subsys_sysfs_ops = {
.store = subsys_attr_store,
};
+/*
+ * There's one sysfs_buffer for each open file and one
+ * sysfs_open_dirent for each sysfs_dirent with one or more open
+ * files.
+ *
+ * filp->private_data points to sysfs_buffer and
+ * sysfs_dirent->s_attr.open points to sysfs_open_dirent. s_attr.open
+ * is protected by sysfs_open_dirent_lock.
+ */
+static spinlock_t sysfs_open_dirent_lock = SPIN_LOCK_UNLOCKED;
+
+struct sysfs_open_dirent {
+ atomic_t refcnt;
+ atomic_t event;
+ wait_queue_head_t poll;
+ struct list_head buffers; /* goes through sysfs_buffer.list */
+};
+
struct sysfs_buffer {
size_t count;
loff_t pos;
char * page;
struct sysfs_ops * ops;
- struct semaphore sem;
+ struct mutex mutex;
int needs_read_fill;
int event;
+ struct list_head list;
};
/**
@@ -74,7 +100,7 @@ struct sysfs_buffer {
static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_ops * ops = buffer->ops;
int ret = 0;
ssize_t count;
@@ -88,8 +114,8 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
- buffer->event = atomic_read(&attr_sd->s_event);
- count = ops->show(kobj, attr_sd->s_elem.attr.attr, buffer->page);
+ buffer->event = atomic_read(&attr_sd->s_attr.open->event);
+ count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
sysfs_put_active_two(attr_sd);
@@ -128,7 +154,7 @@ sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct sysfs_buffer * buffer = file->private_data;
ssize_t retval = 0;
- down(&buffer->sem);
+ mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill) {
retval = fill_read_buffer(file->f_path.dentry,buffer);
if (retval)
@@ -139,7 +165,7 @@ sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
buffer->count);
out:
- up(&buffer->sem);
+ mutex_unlock(&buffer->mutex);
return retval;
}
@@ -189,7 +215,7 @@ static int
flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t count)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_ops * ops = buffer->ops;
int rc;
@@ -197,7 +223,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
- rc = ops->store(kobj, attr_sd->s_elem.attr.attr, buffer->page, count);
+ rc = ops->store(kobj, attr_sd->s_attr.attr, buffer->page, count);
sysfs_put_active_two(attr_sd);
@@ -228,20 +254,102 @@ sysfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t
struct sysfs_buffer * buffer = file->private_data;
ssize_t len;
- down(&buffer->sem);
+ mutex_lock(&buffer->mutex);
len = fill_write_buffer(buffer, buf, count);
if (len > 0)
len = flush_write_buffer(file->f_path.dentry, buffer, len);
if (len > 0)
*ppos += len;
- up(&buffer->sem);
+ mutex_unlock(&buffer->mutex);
return len;
}
+/**
+ * sysfs_get_open_dirent - get or create sysfs_open_dirent
+ * @sd: target sysfs_dirent
+ * @buffer: sysfs_buffer for this instance of open
+ *
+ * If @sd->s_attr.open exists, increment its reference count;
+ * otherwise, create one. @buffer is chained to the buffers
+ * list.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
+ struct sysfs_buffer *buffer)
+{
+ struct sysfs_open_dirent *od, *new_od = NULL;
+
+ retry:
+ spin_lock(&sysfs_open_dirent_lock);
+
+ if (!sd->s_attr.open && new_od) {
+ sd->s_attr.open = new_od;
+ new_od = NULL;
+ }
+
+ od = sd->s_attr.open;
+ if (od) {
+ atomic_inc(&od->refcnt);
+ list_add_tail(&buffer->list, &od->buffers);
+ }
+
+ spin_unlock(&sysfs_open_dirent_lock);
+
+ if (od) {
+ kfree(new_od);
+ return 0;
+ }
+
+ /* not there, initialize a new one and retry */
+ new_od = kmalloc(sizeof(*new_od), GFP_KERNEL);
+ if (!new_od)
+ return -ENOMEM;
+
+ atomic_set(&new_od->refcnt, 0);
+ atomic_set(&new_od->event, 1);
+ init_waitqueue_head(&new_od->poll);
+ INIT_LIST_HEAD(&new_od->buffers);
+ goto retry;
+}
+
+/**
+ * sysfs_put_open_dirent - put sysfs_open_dirent
+ * @sd: target sysfs_dirent
+ * @buffer: associated sysfs_buffer
+ *
+ * Put @sd->s_attr.open and unlink @buffer from the buffers list.
+ * If reference count reaches zero, disassociate and free it.
+ *
+ * LOCKING:
+ * None.
+ */
+static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
+ struct sysfs_buffer *buffer)
+{
+ struct sysfs_open_dirent *od = sd->s_attr.open;
+
+ spin_lock(&sysfs_open_dirent_lock);
+
+ list_del(&buffer->list);
+ if (atomic_dec_and_test(&od->refcnt))
+ sd->s_attr.open = NULL;
+ else
+ od = NULL;
+
+ spin_unlock(&sysfs_open_dirent_lock);
+
+ kfree(od);
+}
+
static int sysfs_open_file(struct inode *inode, struct file *file)
{
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_buffer * buffer;
struct sysfs_ops * ops = NULL;
int error;
@@ -294,33 +402,38 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
if (!buffer)
goto err_out;
- init_MUTEX(&buffer->sem);
+ mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
buffer->ops = ops;
file->private_data = buffer;
- /* open succeeded, put active references and pin attr_sd */
+ /* make sure we have open dirent struct */
+ error = sysfs_get_open_dirent(attr_sd, buffer);
+ if (error)
+ goto err_free;
+
+ /* open succeeded, put active references */
sysfs_put_active_two(attr_sd);
- sysfs_get(attr_sd);
return 0;
+ err_free:
+ kfree(buffer);
err_out:
sysfs_put_active_two(attr_sd);
return error;
}
-static int sysfs_release(struct inode * inode, struct file * filp)
+static int sysfs_release(struct inode *inode, struct file *filp)
{
- struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
+ struct sysfs_dirent *sd = filp->f_path.dentry->d_fsdata;
struct sysfs_buffer *buffer = filp->private_data;
- sysfs_put(attr_sd);
+ sysfs_put_open_dirent(sd, buffer);
+
+ if (buffer->page)
+ free_page((unsigned long)buffer->page);
+ kfree(buffer);
- if (buffer) {
- if (buffer->page)
- free_page((unsigned long)buffer->page);
- kfree(buffer);
- }
return 0;
}
@@ -335,24 +448,24 @@ static int sysfs_release(struct inode * inode, struct file * filp)
* again will not get new data, or reset the state of 'poll'.
* Reminder: this only works for attributes which actively support
* it, and it is not possible to test an attribute from userspace
- * to see if it supports poll (Nether 'poll' or 'select' return
+ * to see if it supports poll (Neither 'poll' nor 'select' return
* an appropriate error code). When in doubt, set a suitable timeout value.
*/
static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
{
struct sysfs_buffer * buffer = filp->private_data;
struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct sysfs_open_dirent *od = attr_sd->s_attr.open;
/* need parent for the kobj, grab both */
if (!sysfs_get_active_two(attr_sd))
goto trigger;
- poll_wait(filp, &kobj->poll, wait);
+ poll_wait(filp, &od->poll, wait);
sysfs_put_active_two(attr_sd);
- if (buffer->event != atomic_read(&attr_sd->s_event))
+ if (buffer->event != atomic_read(&od->event))
goto trigger;
return 0;
@@ -373,8 +486,17 @@ void sysfs_notify(struct kobject *k, char *dir, char *attr)
if (sd && attr)
sd = sysfs_find_dirent(sd, attr);
if (sd) {
- atomic_inc(&sd->s_event);
- wake_up_interruptible(&k->poll);
+ struct sysfs_open_dirent *od;
+
+ spin_lock(&sysfs_open_dirent_lock);
+
+ od = sd->s_attr.open;
+ if (od) {
+ atomic_inc(&od->event);
+ wake_up_interruptible(&od->poll);
+ }
+
+ spin_unlock(&sysfs_open_dirent_lock);
}
mutex_unlock(&sysfs_mutex);
@@ -397,25 +519,21 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
+ int rc;
sd = sysfs_new_dirent(attr->name, mode, type);
if (!sd)
return -ENOMEM;
- sd->s_elem.attr.attr = (void *)attr;
+ sd->s_attr.attr = (void *)attr;
sysfs_addrm_start(&acxt, dir_sd);
+ rc = sysfs_add_one(&acxt, sd);
+ sysfs_addrm_finish(&acxt);
- if (!sysfs_find_dirent(dir_sd, attr->name)) {
- sysfs_add_one(&acxt, sd);
- sysfs_link_sibling(sd);
- }
-
- if (!sysfs_addrm_finish(&acxt)) {
+ if (rc)
sysfs_put(sd);
- return -EEXIST;
- }
- return 0;
+ return rc;
}
@@ -457,42 +575,6 @@ int sysfs_add_file_to_group(struct kobject *kobj,
}
EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
-
-/**
- * sysfs_update_file - update the modified timestamp on an object attribute.
- * @kobj: object we're acting for.
- * @attr: attribute descriptor.
- */
-int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
-{
- struct sysfs_dirent *victim_sd = NULL;
- struct dentry *victim = NULL;
- int rc;
-
- rc = -ENOENT;
- victim_sd = sysfs_get_dirent(kobj->sd, attr->name);
- if (!victim_sd)
- goto out;
-
- victim = sysfs_get_dentry(victim_sd);
- if (IS_ERR(victim)) {
- rc = PTR_ERR(victim);
- victim = NULL;
- goto out;
- }
-
- mutex_lock(&victim->d_inode->i_mutex);
- victim->d_inode->i_mtime = CURRENT_TIME;
- fsnotify_modify(victim);
- mutex_unlock(&victim->d_inode->i_mutex);
- rc = 0;
- out:
- dput(victim);
- sysfs_put(victim_sd);
- return rc;
-}
-
-
/**
* sysfs_chmod_file - update the modified mode value on an object attribute.
* @kobj: object we're acting for.
@@ -513,7 +595,9 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
if (!victim_sd)
goto out;
+ mutex_lock(&sysfs_rename_mutex);
victim = sysfs_get_dentry(victim_sd);
+ mutex_unlock(&sysfs_rename_mutex);
if (IS_ERR(victim)) {
rc = PTR_ERR(victim);
victim = NULL;
@@ -521,10 +605,19 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
}
inode = victim->d_inode;
+
mutex_lock(&inode->i_mutex);
+
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
rc = notify_change(victim, &newattrs);
+
+ if (rc == 0) {
+ mutex_lock(&sysfs_mutex);
+ victim_sd->s_mode = newattrs.ia_mode;
+ mutex_unlock(&sysfs_mutex);
+ }
+
mutex_unlock(&inode->i_mutex);
out:
dput(victim);
@@ -632,4 +725,3 @@ EXPORT_SYMBOL_GPL(sysfs_schedule_callback);
EXPORT_SYMBOL_GPL(sysfs_create_file);
EXPORT_SYMBOL_GPL(sysfs_remove_file);
-EXPORT_SYMBOL_GPL(sysfs_update_file);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index f318b73c790..d1972374655 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -13,8 +13,6 @@
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/err.h>
-#include <linux/fs.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 10d1b52899f..9236635111f 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -1,7 +1,11 @@
/*
- * inode.c - basic inode and dentry operations.
+ * fs/sysfs/inode.c - basic sysfs inode and dentry operations
*
- * sysfs is Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
*
* Please see Documentation/filesystems/sysfs.txt for more information.
*/
@@ -14,7 +18,6 @@
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
extern struct super_block * sysfs_sb;
@@ -34,16 +37,6 @@ static const struct inode_operations sysfs_inode_operations ={
.setattr = sysfs_setattr,
};
-void sysfs_delete_inode(struct inode *inode)
-{
- /* Free the shadowed directory inode operations */
- if (sysfs_is_shadowed_inode(inode)) {
- kfree(inode->i_op);
- inode->i_op = NULL;
- }
- return generic_delete_inode(inode);
-}
-
int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
{
struct inode * inode = dentry->d_inode;
@@ -133,8 +126,22 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
*/
static struct lock_class_key sysfs_inode_imutex_key;
+static int sysfs_count_nlink(struct sysfs_dirent *sd)
+{
+ struct sysfs_dirent *child;
+ int nr = 0;
+
+ for (child = sd->s_dir.children; child; child = child->s_sibling)
+ if (sysfs_type(child) == SYSFS_DIR)
+ nr++;
+
+ return nr + 2;
+}
+
static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
{
+ struct bin_attribute *bin_attr;
+
inode->i_blocks = 0;
inode->i_mapping->a_ops = &sysfs_aops;
inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
@@ -150,6 +157,32 @@ static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
set_inode_attr(inode, sd->s_iattr);
} else
set_default_inode_attr(inode, sd->s_mode);
+
+
+ /* initialize inode according to type */
+ switch (sysfs_type(sd)) {
+ case SYSFS_DIR:
+ inode->i_op = &sysfs_dir_inode_operations;
+ inode->i_fop = &sysfs_dir_operations;
+ inode->i_nlink = sysfs_count_nlink(sd);
+ break;
+ case SYSFS_KOBJ_ATTR:
+ inode->i_size = PAGE_SIZE;
+ inode->i_fop = &sysfs_file_operations;
+ break;
+ case SYSFS_KOBJ_BIN_ATTR:
+ bin_attr = sd->s_bin_attr.bin_attr;
+ inode->i_size = bin_attr->size;
+ inode->i_fop = &bin_fops;
+ break;
+ case SYSFS_KOBJ_LINK:
+ inode->i_op = &sysfs_symlink_inode_operations;
+ break;
+ default:
+ BUG();
+ }
+
+ unlock_new_inode(inode);
}
/**
@@ -177,50 +210,24 @@ struct inode * sysfs_get_inode(struct sysfs_dirent *sd)
return inode;
}
-/**
- * sysfs_instantiate - instantiate dentry
- * @dentry: dentry to be instantiated
- * @inode: inode associated with @sd
- *
- * Unlock @inode if locked and instantiate @dentry with @inode.
- *
- * LOCKING:
- * None.
- */
-void sysfs_instantiate(struct dentry *dentry, struct inode *inode)
-{
- BUG_ON(!dentry || dentry->d_inode);
-
- if (inode->i_state & I_NEW)
- unlock_new_inode(inode);
-
- d_instantiate(dentry, inode);
-}
-
int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name)
{
struct sysfs_addrm_cxt acxt;
- struct sysfs_dirent **pos, *sd;
+ struct sysfs_dirent *sd;
if (!dir_sd)
return -ENOENT;
sysfs_addrm_start(&acxt, dir_sd);
- for (pos = &dir_sd->s_children; *pos; pos = &(*pos)->s_sibling) {
- sd = *pos;
-
- if (!sysfs_type(sd))
- continue;
- if (!strcmp(sd->s_name, name)) {
- *pos = sd->s_sibling;
- sd->s_sibling = NULL;
- sysfs_remove_one(&acxt, sd);
- break;
- }
- }
+ sd = sysfs_find_dirent(dir_sd, name);
+ if (sd)
+ sysfs_remove_one(&acxt, sd);
+
+ sysfs_addrm_finish(&acxt);
- if (sysfs_addrm_finish(&acxt))
+ if (sd)
return 0;
- return -ENOENT;
+ else
+ return -ENOENT;
}
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index fbc7b65fe26..c76c540be3c 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -1,5 +1,13 @@
/*
- * mount.c - operations for initializing and mounting sysfs.
+ * fs/sysfs/symlink.c - operations for initializing and mounting sysfs
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#define DEBUG
@@ -8,25 +16,25 @@
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
/* Random magic number */
#define SYSFS_MAGIC 0x62656572
-struct vfsmount *sysfs_mount;
+static struct vfsmount *sysfs_mount;
struct super_block * sysfs_sb = NULL;
struct kmem_cache *sysfs_dir_cachep;
static const struct super_operations sysfs_ops = {
.statfs = simple_statfs,
- .drop_inode = sysfs_delete_inode,
+ .drop_inode = generic_delete_inode,
};
struct sysfs_dirent sysfs_root = {
+ .s_name = "",
.s_count = ATOMIC_INIT(1),
- .s_flags = SYSFS_ROOT,
+ .s_flags = SYSFS_DIR,
.s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
.s_ino = 1,
};
@@ -50,11 +58,6 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
}
- inode->i_op = &sysfs_dir_inode_operations;
- inode->i_fop = &sysfs_dir_operations;
- inc_nlink(inode); /* directory, account for "." */
- unlock_new_inode(inode);
-
/* instantiate and link root dentry */
root = d_alloc_root(inode);
if (!root) {
@@ -62,7 +65,6 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
iput(inode);
return -ENOMEM;
}
- sysfs_root.s_dentry = root;
root->d_fsdata = &sysfs_root;
sb->s_root = root;
return 0;
@@ -77,7 +79,7 @@ static int sysfs_get_sb(struct file_system_type *fs_type,
static struct file_system_type sysfs_fs_type = {
.name = "sysfs",
.get_sb = sysfs_get_sb,
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
int __init sysfs_init(void)
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 4ce687f0b5d..3eac20c63c4 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -1,5 +1,13 @@
/*
- * symlink.c - operations for sysfs symlinks.
+ * fs/sysfs/symlink.c - sysfs symlink implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#include <linux/fs.h>
@@ -7,7 +15,7 @@
#include <linux/module.h>
#include <linux/kobject.h>
#include <linux/namei.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include "sysfs.h"
@@ -60,10 +68,9 @@ int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char
BUG_ON(!name);
- if (!kobj) {
- if (sysfs_mount && sysfs_mount->mnt_sb)
- parent_sd = sysfs_mount->mnt_sb->s_root->d_fsdata;
- } else
+ if (!kobj)
+ parent_sd = &sysfs_root;
+ else
parent_sd = kobj->sd;
error = -EFAULT;
@@ -87,20 +94,15 @@ int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char
if (!sd)
goto out_put;
- sd->s_elem.symlink.target_sd = target_sd;
+ sd->s_symlink.target_sd = target_sd;
target_sd = NULL; /* reference is now owned by the symlink */
sysfs_addrm_start(&acxt, parent_sd);
+ error = sysfs_add_one(&acxt, sd);
+ sysfs_addrm_finish(&acxt);
- if (!sysfs_find_dirent(parent_sd, name)) {
- sysfs_add_one(&acxt, sd);
- sysfs_link_sibling(sd);
- }
-
- if (!sysfs_addrm_finish(&acxt)) {
- error = -EEXIST;
+ if (error)
goto out_put;
- }
return 0;
@@ -148,7 +150,7 @@ static int sysfs_getlink(struct dentry *dentry, char * path)
{
struct sysfs_dirent *sd = dentry->d_fsdata;
struct sysfs_dirent *parent_sd = sd->s_parent;
- struct sysfs_dirent *target_sd = sd->s_elem.symlink.target_sd;
+ struct sysfs_dirent *target_sd = sd->s_symlink.target_sd;
int error;
mutex_lock(&sysfs_mutex);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 6b8c8d76d30..f0326f281d1 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -1,20 +1,39 @@
+/*
+ * fs/sysfs/sysfs.h - sysfs internal header file
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ */
+
+struct sysfs_open_dirent;
+
+/* type-specific structures for sysfs_dirent->s_* union members */
struct sysfs_elem_dir {
- struct kobject * kobj;
+ struct kobject *kobj;
+ /* children list starts here and goes through sd->s_sibling */
+ struct sysfs_dirent *children;
};
struct sysfs_elem_symlink {
- struct sysfs_dirent * target_sd;
+ struct sysfs_dirent *target_sd;
};
struct sysfs_elem_attr {
- struct attribute * attr;
+ struct attribute *attr;
+ struct sysfs_open_dirent *open;
};
struct sysfs_elem_bin_attr {
- struct bin_attribute * bin_attr;
+ struct bin_attribute *bin_attr;
};
/*
+ * sysfs_dirent - the building block of sysfs hierarchy. Each and
+ * every sysfs node is represented by single sysfs_dirent.
+ *
* As long as s_count reference is held, the sysfs_dirent itself is
* accessible. Dereferencing s_elem or any other outer entity
* requires s_active reference.
@@ -22,28 +41,43 @@ struct sysfs_elem_bin_attr {
struct sysfs_dirent {
atomic_t s_count;
atomic_t s_active;
- struct sysfs_dirent * s_parent;
- struct sysfs_dirent * s_sibling;
- struct sysfs_dirent * s_children;
- const char * s_name;
+ struct sysfs_dirent *s_parent;
+ struct sysfs_dirent *s_sibling;
+ const char *s_name;
union {
- struct sysfs_elem_dir dir;
- struct sysfs_elem_symlink symlink;
- struct sysfs_elem_attr attr;
- struct sysfs_elem_bin_attr bin_attr;
- } s_elem;
+ struct sysfs_elem_dir s_dir;
+ struct sysfs_elem_symlink s_symlink;
+ struct sysfs_elem_attr s_attr;
+ struct sysfs_elem_bin_attr s_bin_attr;
+ };
unsigned int s_flags;
- umode_t s_mode;
ino_t s_ino;
- struct dentry * s_dentry;
- struct iattr * s_iattr;
- atomic_t s_event;
+ umode_t s_mode;
+ struct iattr *s_iattr;
};
-#define SD_DEACTIVATED_BIAS INT_MIN
+#define SD_DEACTIVATED_BIAS INT_MIN
+
+#define SYSFS_TYPE_MASK 0x00ff
+#define SYSFS_DIR 0x0001
+#define SYSFS_KOBJ_ATTR 0x0002
+#define SYSFS_KOBJ_BIN_ATTR 0x0004
+#define SYSFS_KOBJ_LINK 0x0008
+#define SYSFS_COPY_NAME (SYSFS_DIR | SYSFS_KOBJ_LINK)
+
+#define SYSFS_FLAG_MASK ~SYSFS_TYPE_MASK
+#define SYSFS_FLAG_REMOVED 0x0200
+
+static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
+{
+ return sd->s_flags & SYSFS_TYPE_MASK;
+}
+/*
+ * Context structure to be used while adding/removing nodes.
+ */
struct sysfs_addrm_cxt {
struct sysfs_dirent *parent_sd;
struct inode *parent_inode;
@@ -51,63 +85,47 @@ struct sysfs_addrm_cxt {
int cnt;
};
-extern struct vfsmount * sysfs_mount;
+/*
+ * mount.c
+ */
extern struct sysfs_dirent sysfs_root;
+extern struct super_block *sysfs_sb;
extern struct kmem_cache *sysfs_dir_cachep;
-extern struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd);
-extern void sysfs_link_sibling(struct sysfs_dirent *sd);
-extern void sysfs_unlink_sibling(struct sysfs_dirent *sd);
-extern struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd);
-extern void sysfs_put_active(struct sysfs_dirent *sd);
-extern struct sysfs_dirent *sysfs_get_active_two(struct sysfs_dirent *sd);
-extern void sysfs_put_active_two(struct sysfs_dirent *sd);
-extern void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *parent_sd);
-extern void sysfs_add_one(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *sd);
-extern void sysfs_remove_one(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *sd);
-extern int sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
-
-extern void sysfs_delete_inode(struct inode *inode);
-extern struct inode * sysfs_get_inode(struct sysfs_dirent *sd);
-extern void sysfs_instantiate(struct dentry *dentry, struct inode *inode);
-
-extern void release_sysfs_dirent(struct sysfs_dirent * sd);
-extern struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
- const unsigned char *name);
-extern struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
- const unsigned char *name);
-extern struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode,
- int type);
-
-extern int sysfs_add_file(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type);
-extern int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
-extern struct sysfs_dirent *sysfs_find(struct sysfs_dirent *dir, const char * name);
-
-extern int sysfs_create_subdir(struct kobject *kobj, const char *name,
- struct sysfs_dirent **p_sd);
-extern void sysfs_remove_subdir(struct sysfs_dirent *sd);
-
-extern int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
-
-extern spinlock_t sysfs_assoc_lock;
+/*
+ * dir.c
+ */
extern struct mutex sysfs_mutex;
-extern struct super_block * sysfs_sb;
+extern struct mutex sysfs_rename_mutex;
+extern spinlock_t sysfs_assoc_lock;
+
extern const struct file_operations sysfs_dir_operations;
-extern const struct file_operations sysfs_file_operations;
-extern const struct file_operations bin_fops;
extern const struct inode_operations sysfs_dir_inode_operations;
-extern const struct inode_operations sysfs_symlink_inode_operations;
-
-static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
-{
- return sd->s_flags & SYSFS_TYPE_MASK;
-}
-static inline struct sysfs_dirent * sysfs_get(struct sysfs_dirent * sd)
+struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd);
+struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd);
+void sysfs_put_active(struct sysfs_dirent *sd);
+struct sysfs_dirent *sysfs_get_active_two(struct sysfs_dirent *sd);
+void sysfs_put_active_two(struct sysfs_dirent *sd);
+void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
+ struct sysfs_dirent *parent_sd);
+int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd);
+void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd);
+void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
+
+struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
+ const unsigned char *name);
+struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+ const unsigned char *name);
+struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type);
+
+void release_sysfs_dirent(struct sysfs_dirent *sd);
+
+int sysfs_create_subdir(struct kobject *kobj, const char *name,
+ struct sysfs_dirent **p_sd);
+void sysfs_remove_subdir(struct sysfs_dirent *sd);
+
+static inline struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
{
if (sd) {
WARN_ON(!atomic_read(&sd->s_count));
@@ -116,13 +134,33 @@ static inline struct sysfs_dirent * sysfs_get(struct sysfs_dirent * sd)
return sd;
}
-static inline void sysfs_put(struct sysfs_dirent * sd)
+static inline void sysfs_put(struct sysfs_dirent *sd)
{
if (sd && atomic_dec_and_test(&sd->s_count))
release_sysfs_dirent(sd);
}
-static inline int sysfs_is_shadowed_inode(struct inode *inode)
-{
- return S_ISDIR(inode->i_mode) && inode->i_op->follow_link;
-}
+/*
+ * inode.c
+ */
+struct inode *sysfs_get_inode(struct sysfs_dirent *sd);
+int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
+
+/*
+ * file.c
+ */
+extern const struct file_operations sysfs_file_operations;
+
+int sysfs_add_file(struct sysfs_dirent *dir_sd,
+ const struct attribute *attr, int type);
+
+/*
+ * bin.c
+ */
+extern const struct file_operations bin_fops;
+
+/*
+ * symlink.c
+ */
+extern const struct inode_operations sysfs_symlink_inode_operations;