summaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-10-13 14:43:54 +0100
committerDavid Woodhouse <dwmw2@infradead.org>2007-10-13 14:43:54 +0100
commitb160292cc216a50fd0cd386b0bda2cd48352c73b (patch)
treeef07cf98f91353ee4c9ec1e1ca7a2a5d9d4b538a /fs/xfs
parentb37bde147890c8fea8369a5a4e230dabdea4ebfb (diff)
parentbbf25010f1a6b761914430f5fca081ec8c7accd1 (diff)
Merge Linux 2.6.23
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/kmem.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c3
-rw-r--r--fs/xfs/support/debug.h10
-rw-r--r--fs/xfs/xfs_da_btree.c1
-rw-r--r--fs/xfs/xfs_filestream.c10
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_log_recover.c12
-rw-r--r--fs/xfs/xfs_mru_cache.c72
-rw-r--r--fs/xfs/xfs_mru_cache.h6
-rw-r--r--fs/xfs/xfs_vnodeops.c20
13 files changed, 77 insertions, 86 deletions
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index b4acc7f3c37..e6ea293f303 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -103,7 +103,7 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
static inline int
kmem_shake_allow(gfp_t gfp_mask)
{
- return (gfp_mask & __GFP_WAIT);
+ return (gfp_mask & __GFP_WAIT) != 0;
}
#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index fd4105d662e..5f152f60d74 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -181,6 +181,7 @@ xfs_setfilesize(
ip->i_d.di_size = isize;
ip->i_update_core = 1;
ip->i_update_size = 1;
+ mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -652,7 +653,7 @@ xfs_probe_cluster(
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
- size_t pg_offset, len = 0;
+ size_t pg_offset, pg_len = 0;
if (tindex == tlast) {
pg_offset =
@@ -665,16 +666,16 @@ xfs_probe_cluster(
pg_offset = PAGE_CACHE_SIZE;
if (page->index == tindex && !TestSetPageLocked(page)) {
- len = xfs_probe_page(page, pg_offset, mapped);
+ pg_len = xfs_probe_page(page, pg_offset, mapped);
unlock_page(page);
}
- if (!len) {
+ if (!pg_len) {
done = 1;
break;
}
- total += len;
+ total += pg_len;
tindex++;
}
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index bb72c3d4141..81565dea9af 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -46,7 +46,7 @@ xfs_param_t xfs_params = {
.inherit_nosym = { 0, 0, 1 },
.rotorstep = { 1, 1, 255 },
.inherit_nodfrg = { 0, 1, 1 },
- .fstrm_timer = { 1, 50, 3600*100},
+ .fstrm_timer = { 1, 30*100, 3600*100},
};
/*
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 4528f9a3f30..491d1f4f202 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -415,8 +415,10 @@ xfs_fs_write_inode(
if (vp) {
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
- if (sync)
+ if (sync) {
+ filemap_fdatawait(inode->i_mapping);
flags |= FLUSH_SYNC;
+ }
error = bhv_vop_iflush(vp, flags);
if (error == EAGAIN)
error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 2d274b23ade..6ff0f4de163 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -120,7 +120,8 @@ xfs_Gqm_init(void)
* Initialize the dquot hash tables.
*/
udqhash = kmem_zalloc_greedy(&hsize,
- XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH,
+ XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
+ XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
KM_SLEEP | KM_MAYFAIL | KM_LARGE);
gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
hsize /= sizeof(xfs_dqhash_t);
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index a27a7c8c052..855da040864 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -34,10 +34,10 @@ extern void cmn_err(int, char *, ...)
extern void assfail(char *expr, char *f, int l);
#define ASSERT_ALWAYS(expr) \
- (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef DEBUG
-# define ASSERT(expr) ((void)0)
+#define ASSERT(expr) ((void)0)
#ifndef STATIC
# define STATIC static noinline
@@ -49,8 +49,10 @@ extern void assfail(char *expr, char *f, int l);
#else /* DEBUG */
-# define ASSERT(expr) ASSERT_ALWAYS(expr)
-# include <linux/random.h>
+#include <linux/random.h>
+
+#define ASSERT(expr) \
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef STATIC
# define STATIC noinline
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index aea37df4aa6..26d09e2e1a7 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1975,7 +1975,6 @@ xfs_da_do_buf(
error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
if (unlikely(error == EFSCORRUPTED)) {
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
- int i;
cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
(long long)bno);
cmn_err(CE_ALERT, "dir: inode %lld\n",
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index ce2278611bb..36d8f6aa11a 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -350,9 +350,10 @@ _xfs_filestream_update_ag(
/* xfs_fstrm_free_func(): callback for freeing cached stream items. */
void
xfs_fstrm_free_func(
- xfs_ino_t ino,
- fstrm_item_t *item)
+ unsigned long ino,
+ void *data)
{
+ fstrm_item_t *item = (fstrm_item_t *)data;
xfs_inode_t *ip = item->ip;
int ref;
@@ -438,7 +439,7 @@ xfs_filestream_mount(
grp_count = 10;
err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count,
- (xfs_mru_cache_free_func_t)xfs_fstrm_free_func);
+ xfs_fstrm_free_func);
return err;
}
@@ -467,8 +468,7 @@ void
xfs_filestream_flush(
xfs_mount_t *mp)
{
- /* point in time flush, so keep the reaper running */
- xfs_mru_cache_flush(mp->m_filestream, 1);
+ xfs_mru_cache_flush(mp->m_filestream);
}
/*
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9d4c4fbeb3e..9bfb69e1e88 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2185,13 +2185,13 @@ xlog_state_do_callback(
}
cb = iclog->ic_callback;
- while (cb != 0) {
+ while (cb) {
iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_callback = NULL;
LOG_UNLOCK(log, s);
/* perform callbacks in the order given */
- for (; cb != 0; cb = cb_next) {
+ for (; cb; cb = cb_next) {
cb_next = cb->cb_next;
cb->cb_func(cb->cb_arg, aborted);
}
@@ -2202,7 +2202,7 @@ xlog_state_do_callback(
loopdidcallbacks++;
funcdidcallbacks++;
- ASSERT(iclog->ic_callback == 0);
+ ASSERT(iclog->ic_callback == NULL);
if (!(iclog->ic_state & XLOG_STATE_IOERROR))
iclog->ic_state = XLOG_STATE_DIRTY;
@@ -3242,10 +3242,10 @@ xlog_ticket_put(xlog_t *log,
#else
/* When we debug, it is easier if tickets are cycled */
ticket->t_next = NULL;
- if (log->l_tail != 0) {
+ if (log->l_tail) {
log->l_tail->t_next = ticket;
} else {
- ASSERT(log->l_freelist == 0);
+ ASSERT(log->l_freelist == NULL);
log->l_freelist = ticket;
}
log->l_tail = ticket;
@@ -3463,7 +3463,7 @@ xlog_verify_iclog(xlog_t *log,
s = LOG_LOCK(log);
icptr = log->l_iclog;
for (i=0; i < log->l_iclog_bufs; i++) {
- if (icptr == 0)
+ if (icptr == NULL)
xlog_panic("xlog_verify_iclog: invalid ptr");
icptr = icptr->ic_next;
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index fddbb091a86..8ae6e8e5f3d 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1366,7 +1366,7 @@ xlog_recover_add_to_cont_trans(
int old_len;
item = trans->r_itemq;
- if (item == 0) {
+ if (item == NULL) {
/* finish copying rest of trans header */
xlog_recover_add_item(&trans->r_itemq);
ptr = (xfs_caddr_t) &trans->r_theader +
@@ -1412,7 +1412,7 @@ xlog_recover_add_to_trans(
if (!len)
return 0;
item = trans->r_itemq;
- if (item == 0) {
+ if (item == NULL) {
ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
if (len == sizeof(xfs_trans_header_t))
xlog_recover_add_item(&trans->r_itemq);
@@ -1467,12 +1467,12 @@ xlog_recover_unlink_tid(
xlog_recover_t *tp;
int found = 0;
- ASSERT(trans != 0);
+ ASSERT(trans != NULL);
if (trans == *q) {
*q = (*q)->r_next;
} else {
tp = *q;
- while (tp != 0) {
+ while (tp) {
if (tp->r_next == trans) {
found = 1;
break;
@@ -1495,7 +1495,7 @@ xlog_recover_insert_item_backq(
xlog_recover_item_t **q,
xlog_recover_item_t *item)
{
- if (*q == 0) {
+ if (*q == NULL) {
item->ri_prev = item->ri_next = item;
*q = item;
} else {
@@ -1899,7 +1899,7 @@ xlog_recover_do_reg_buffer(
break;
nbits = xfs_contig_bits(data_map, map_size, bit);
ASSERT(nbits > 0);
- ASSERT(item->ri_buf[i].i_addr != 0);
+ ASSERT(item->ri_buf[i].i_addr != NULL);
ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
ASSERT(XFS_BUF_COUNT(bp) >=
((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 7deb9e3cbbd..e0b358c1c53 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -206,8 +206,11 @@ _xfs_mru_cache_list_insert(
*/
if (!_xfs_mru_cache_migrate(mru, now)) {
mru->time_zero = now;
- if (!mru->next_reap)
- mru->next_reap = mru->grp_count * mru->grp_time;
+ if (!mru->queued) {
+ mru->queued = 1;
+ queue_delayed_work(xfs_mru_reap_wq, &mru->work,
+ mru->grp_count * mru->grp_time);
+ }
} else {
grp = (now - mru->time_zero) / mru->grp_time;
grp = (mru->lru_grp + grp) % mru->grp_count;
@@ -271,29 +274,26 @@ _xfs_mru_cache_reap(
struct work_struct *work)
{
xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work);
- unsigned long now;
+ unsigned long now, next;
ASSERT(mru && mru->lists);
if (!mru || !mru->lists)
return;
mutex_spinlock(&mru->lock);
- now = jiffies;
- if (mru->reap_all ||
- (mru->next_reap && time_after(now, mru->next_reap))) {
- if (mru->reap_all)
- now += mru->grp_count * mru->grp_time * 2;
- mru->next_reap = _xfs_mru_cache_migrate(mru, now);
- _xfs_mru_cache_clear_reap_list(mru);
+ next = _xfs_mru_cache_migrate(mru, jiffies);
+ _xfs_mru_cache_clear_reap_list(mru);
+
+ mru->queued = next;
+ if ((mru->queued > 0)) {
+ now = jiffies;
+ if (next <= now)
+ next = 0;
+ else
+ next -= now;
+ queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
}
- /*
- * the process that triggered the reap_all is responsible
- * for restating the periodic reap if it is required.
- */
- if (!mru->reap_all)
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
- mru->reap_all = 0;
mutex_spinunlock(&mru->lock, 0);
}
@@ -352,7 +352,7 @@ xfs_mru_cache_create(
/* An extra list is needed to avoid reaping up to a grp_time early. */
mru->grp_count = grp_count + 1;
- mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
+ mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
if (!mru->lists) {
err = ENOMEM;
@@ -374,11 +374,6 @@ xfs_mru_cache_create(
mru->grp_time = grp_time;
mru->free_func = free_func;
- /* start up the reaper event */
- mru->next_reap = 0;
- mru->reap_all = 0;
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
-
*mrup = mru;
exit:
@@ -394,35 +389,25 @@ exit:
* Call xfs_mru_cache_flush() to flush out all cached entries, calling their
* free functions as they're deleted. When this function returns, the caller is
* guaranteed that all the free functions for all the elements have finished
- * executing.
- *
- * While we are flushing, we stop the periodic reaper event from triggering.
- * Normally, we want to restart this periodic event, but if we are shutting
- * down the cache we do not want it restarted. hence the restart parameter
- * where 0 = do not restart reaper and 1 = restart reaper.
+ * executing and the reaper is not running.
*/
void
xfs_mru_cache_flush(
- xfs_mru_cache_t *mru,
- int restart)
+ xfs_mru_cache_t *mru)
{
if (!mru || !mru->lists)
return;
- cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
-
mutex_spinlock(&mru->lock);
- mru->reap_all = 1;
- mutex_spinunlock(&mru->lock, 0);
+ if (mru->queued) {
+ mutex_spinunlock(&mru->lock, 0);
+ cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
+ mutex_spinlock(&mru->lock);
+ }
- queue_work(xfs_mru_reap_wq, &mru->work.work);
- flush_workqueue(xfs_mru_reap_wq);
+ _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
+ _xfs_mru_cache_clear_reap_list(mru);
- mutex_spinlock(&mru->lock);
- WARN_ON_ONCE(mru->reap_all != 0);
- mru->reap_all = 0;
- if (restart)
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
mutex_spinunlock(&mru->lock, 0);
}
@@ -433,8 +418,7 @@ xfs_mru_cache_destroy(
if (!mru || !mru->lists)
return;
- /* we don't want the reaper to restart here */
- xfs_mru_cache_flush(mru, 0);
+ xfs_mru_cache_flush(mru);
kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
kmem_free(mru, sizeof(*mru));
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index 624fd10ee8e..dd58ea1bbeb 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -32,11 +32,9 @@ typedef struct xfs_mru_cache
unsigned int grp_time; /* Time period spanned by grps. */
unsigned int lru_grp; /* Group containing time zero. */
unsigned long time_zero; /* Time first element was added. */
- unsigned long next_reap; /* Time that the reaper should
- next do something. */
- unsigned int reap_all; /* if set, reap all lists */
xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
struct delayed_work work; /* Workqueue data for reaping. */
+ unsigned int queued; /* work has been queued */
} xfs_mru_cache_t;
int xfs_mru_cache_init(void);
@@ -44,7 +42,7 @@ void xfs_mru_cache_uninit(void);
int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
unsigned int grp_count,
xfs_mru_cache_free_func_t free_func);
-void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart);
+void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
void *value);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 1a5ad8cd97b..60345922990 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1082,6 +1082,9 @@ xfs_fsync(
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return XFS_ERROR(EIO);
+ if (flag & FSYNC_DATA)
+ filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
+
/*
* We always need to make sure that the required inode state
* is safe on disk. The vnode might be clean but because
@@ -3769,12 +3772,16 @@ xfs_inode_flush(
sync_lsn = log->l_last_sync_lsn;
GRANT_UNLOCK(log, s);
- if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0))
- return 0;
+ if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) {
+ if (flags & FLUSH_SYNC)
+ log_flags |= XFS_LOG_SYNC;
+ error = xfs_log_force(mp, iip->ili_last_lsn, log_flags);
+ if (error)
+ return error;
+ }
- if (flags & FLUSH_SYNC)
- log_flags |= XFS_LOG_SYNC;
- return xfs_log_force(mp, iip->ili_last_lsn, log_flags);
+ if (ip->i_update_core == 0)
+ return 0;
}
}
@@ -3788,9 +3795,6 @@ xfs_inode_flush(
if (flags & FLUSH_INODE) {
int flush_flags;
- if (xfs_ipincount(ip))
- return EAGAIN;
-
if (flags & FLUSH_SYNC) {
xfs_ilock(ip, XFS_ILOCK_SHARED);
xfs_iflock(ip);