diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2012-01-19 12:56:50 -0800 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-01-19 12:56:50 -0800 |
commit | 282f445a779ed76fca9884fe377bf56a3088b208 (patch) | |
tree | d9abcf526baee0100672851e0a8894c19e762a39 /fs/xfs/xfs_log_cil.c | |
parent | 68f30fbee19cc67849b9fa8e153ede70758afe81 (diff) | |
parent | 90a4c0f51e8e44111a926be6f4c87af3938a79c3 (diff) |
Merge remote-tracking branch 'linus/master' into x86/urgent
Diffstat (limited to 'fs/xfs/xfs_log_cil.c')
-rw-r--r-- | fs/xfs/xfs_log_cil.c | 98 |
1 files changed, 61 insertions, 37 deletions
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index c7755d5a5fb..d4fadbe8ac9 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -32,10 +32,7 @@ #include "xfs_discard.h" /* - * Perform initial CIL structure initialisation. If the CIL is not - * enabled in this filesystem, ensure the log->l_cilp is null so - * we can check this conditional to determine if we are doing delayed - * logging or not. + * Perform initial CIL structure initialisation. */ int xlog_cil_init( @@ -44,10 +41,6 @@ xlog_cil_init( struct xfs_cil *cil; struct xfs_cil_ctx *ctx; - log->l_cilp = NULL; - if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG)) - return 0; - cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); if (!cil) return ENOMEM; @@ -80,9 +73,6 @@ void xlog_cil_destroy( struct log *log) { - if (!log->l_cilp) - return; - if (log->l_cilp->xc_ctx) { if (log->l_cilp->xc_ctx->ticket) xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); @@ -137,9 +127,6 @@ void xlog_cil_init_post_recovery( struct log *log) { - if (!log->l_cilp) - return; - log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); log->l_cilp->xc_ctx->sequence = 1; log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle, @@ -172,37 +159,73 @@ xlog_cil_init_post_recovery( * format the regions into the iclog as though they are being formatted * directly out of the objects themselves. */ -static void -xlog_cil_format_items( - struct log *log, - struct xfs_log_vec *log_vector) +static struct xfs_log_vec * +xlog_cil_prepare_log_vecs( + struct xfs_trans *tp) { - struct xfs_log_vec *lv; + struct xfs_log_item_desc *lidp; + struct xfs_log_vec *lv = NULL; + struct xfs_log_vec *ret_lv = NULL; - ASSERT(log_vector); - for (lv = log_vector; lv; lv = lv->lv_next) { + + /* Bail out if we didn't find a log item. */ + if (list_empty(&tp->t_items)) { + ASSERT(0); + return NULL; + } + + list_for_each_entry(lidp, &tp->t_items, lid_trans) { + struct xfs_log_vec *new_lv; void *ptr; int index; int len = 0; + uint niovecs; + + /* Skip items which aren't dirty in this transaction. */ + if (!(lidp->lid_flags & XFS_LID_DIRTY)) + continue; + + /* Skip items that do not have any vectors for writing */ + niovecs = IOP_SIZE(lidp->lid_item); + if (!niovecs) + continue; + + new_lv = kmem_zalloc(sizeof(*new_lv) + + niovecs * sizeof(struct xfs_log_iovec), + KM_SLEEP); + + /* The allocated iovec region lies beyond the log vector. */ + new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1]; + new_lv->lv_niovecs = niovecs; + new_lv->lv_item = lidp->lid_item; /* build the vector array and calculate it's length */ - IOP_FORMAT(lv->lv_item, lv->lv_iovecp); - for (index = 0; index < lv->lv_niovecs; index++) - len += lv->lv_iovecp[index].i_len; + IOP_FORMAT(new_lv->lv_item, new_lv->lv_iovecp); + for (index = 0; index < new_lv->lv_niovecs; index++) + len += new_lv->lv_iovecp[index].i_len; - lv->lv_buf_len = len; - lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS); - ptr = lv->lv_buf; + new_lv->lv_buf_len = len; + new_lv->lv_buf = kmem_alloc(new_lv->lv_buf_len, + KM_SLEEP|KM_NOFS); + ptr = new_lv->lv_buf; - for (index = 0; index < lv->lv_niovecs; index++) { - struct xfs_log_iovec *vec = &lv->lv_iovecp[index]; + for (index = 0; index < new_lv->lv_niovecs; index++) { + struct xfs_log_iovec *vec = &new_lv->lv_iovecp[index]; memcpy(ptr, vec->i_addr, vec->i_len); vec->i_addr = ptr; ptr += vec->i_len; } - ASSERT(ptr == lv->lv_buf + lv->lv_buf_len); + ASSERT(ptr == new_lv->lv_buf + new_lv->lv_buf_len); + + if (!ret_lv) + ret_lv = new_lv; + else + lv->lv_next = new_lv; + lv = new_lv; } + + return ret_lv; } /* @@ -256,7 +279,7 @@ xfs_cil_prepare_item( * Insert the log items into the CIL and calculate the difference in space * consumed by the item. Add the space to the checkpoint ticket and calculate * if the change requires additional log metadata. If it does, take that space - * as well. Remove the amount of space we addded to the checkpoint ticket from + * as well. Remove the amount of space we added to the checkpoint ticket from * the current transaction ticket so that the accounting works out correctly. */ static void @@ -635,28 +658,30 @@ out_abort: * background commit, returns without it held once background commits are * allowed again. */ -void +int xfs_log_commit_cil( struct xfs_mount *mp, struct xfs_trans *tp, - struct xfs_log_vec *log_vector, xfs_lsn_t *commit_lsn, int flags) { struct log *log = mp->m_log; int log_flags = 0; int push = 0; + struct xfs_log_vec *log_vector; if (flags & XFS_TRANS_RELEASE_LOG_RES) log_flags = XFS_LOG_REL_PERM_RESERV; /* - * do all the hard work of formatting items (including memory + * Do all the hard work of formatting items (including memory * allocation) outside the CIL context lock. This prevents stalling CIL * pushes when we are low on memory and a transaction commit spends a * lot of time in memory reclaim. */ - xlog_cil_format_items(log, log_vector); + log_vector = xlog_cil_prepare_log_vecs(tp); + if (!log_vector) + return ENOMEM; /* lock out background commit */ down_read(&log->l_cilp->xc_ctx_lock); @@ -709,6 +734,7 @@ xfs_log_commit_cil( */ if (push) xlog_cil_push(log, 0); + return 0; } /* @@ -786,8 +812,6 @@ xfs_log_item_in_current_chkpt( { struct xfs_cil_ctx *ctx; - if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG)) - return false; if (list_empty(&lip->li_cil)) return false; |