diff options
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 38 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.h | 1 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 9 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_fs_subr.c | 14 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_ioctl.c | 23 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_ioctl32.c | 12 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_lrw.c | 18 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 78 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.h | 9 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap.c | 192 | ||||
-rw-r--r-- | fs/xfs/xfs_iget.c | 23 | ||||
-rw-r--r-- | fs/xfs/xfs_inode.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.c | 61 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.h | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_log.c | 78 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_vnodeops.c | 7 |
18 files changed, 299 insertions, 274 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c13f67300fe..7ec89fc05b2 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -153,23 +153,6 @@ xfs_find_bdev_for_inode( } /* - * Schedule IO completion handling on a xfsdatad if this was - * the final hold on this ioend. If we are asked to wait, - * flush the workqueue. - */ -STATIC void -xfs_finish_ioend( - xfs_ioend_t *ioend, - int wait) -{ - if (atomic_dec_and_test(&ioend->io_remaining)) { - queue_work(xfsdatad_workqueue, &ioend->io_work); - if (wait) - flush_workqueue(xfsdatad_workqueue); - } -} - -/* * We're now finished for good with this ioend structure. * Update the page state via the associated buffer_heads, * release holds on the inode and bio, and finally free @@ -310,6 +293,27 @@ xfs_end_bio_read( } /* + * Schedule IO completion handling on a xfsdatad if this was + * the final hold on this ioend. If we are asked to wait, + * flush the workqueue. + */ +STATIC void +xfs_finish_ioend( + xfs_ioend_t *ioend, + int wait) +{ + if (atomic_dec_and_test(&ioend->io_remaining)) { + struct workqueue_struct *wq = xfsdatad_workqueue; + if (ioend->io_work.func == xfs_end_bio_unwritten) + wq = xfsconvertd_workqueue; + + queue_work(wq, &ioend->io_work); + if (wait) + flush_workqueue(wq); + } +} + +/* * Allocate and initialise an IO completion structure. * We need to track unwritten extent write completion here initially. * We'll need to extend this for updating the ondisk inode size later diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index 1dd52884975..221b3e66cee 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h @@ -19,6 +19,7 @@ #define __XFS_AOPS_H__ extern struct workqueue_struct *xfsdatad_workqueue; +extern struct workqueue_struct *xfsconvertd_workqueue; extern mempool_t *xfs_ioend_pool; /* diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index aa1016bb913..e28800a9f2b 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -51,6 +51,7 @@ static struct shrinker xfs_buf_shake = { static struct workqueue_struct *xfslogd_workqueue; struct workqueue_struct *xfsdatad_workqueue; +struct workqueue_struct *xfsconvertd_workqueue; #ifdef XFS_BUF_TRACE void @@ -1775,6 +1776,7 @@ xfs_flush_buftarg( xfs_buf_t *bp, *n; int pincount = 0; + xfs_buf_runall_queues(xfsconvertd_workqueue); xfs_buf_runall_queues(xfsdatad_workqueue); xfs_buf_runall_queues(xfslogd_workqueue); @@ -1831,9 +1833,15 @@ xfs_buf_init(void) if (!xfsdatad_workqueue) goto out_destroy_xfslogd_workqueue; + xfsconvertd_workqueue = create_workqueue("xfsconvertd"); + if (!xfsconvertd_workqueue) + goto out_destroy_xfsdatad_workqueue; + register_shrinker(&xfs_buf_shake); return 0; + out_destroy_xfsdatad_workqueue: + destroy_workqueue(xfsdatad_workqueue); out_destroy_xfslogd_workqueue: destroy_workqueue(xfslogd_workqueue); out_free_buf_zone: @@ -1849,6 +1857,7 @@ void xfs_buf_terminate(void) { unregister_shrinker(&xfs_buf_shake); + destroy_workqueue(xfsconvertd_workqueue); destroy_workqueue(xfsdatad_workqueue); destroy_workqueue(xfslogd_workqueue); kmem_zone_destroy(xfs_buf_zone); diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index 5aeb7777696..08be36d7326 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c @@ -74,14 +74,14 @@ xfs_flush_pages( if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { xfs_iflags_clear(ip, XFS_ITRUNCATED); - ret = filemap_fdatawrite(mapping); - if (flags & XFS_B_ASYNC) - return -ret; - ret2 = filemap_fdatawait(mapping); - if (!ret) - ret = ret2; + ret = -filemap_fdatawrite(mapping); } - return -ret; + if (flags & XFS_B_ASYNC) + return ret; + ret2 = xfs_wait_on_pages(ip, first, last); + if (!ret) + ret = ret2; + return ret; } int diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index d0b499418a7..34eaab608e6 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -489,17 +489,12 @@ xfs_attrmulti_attr_set( if (len > XATTR_SIZE_MAX) return EINVAL; - kbuf = kmalloc(len, GFP_KERNEL); - if (!kbuf) - return ENOMEM; - - if (copy_from_user(kbuf, ubuf, len)) - goto out_kfree; + kbuf = memdup_user(ubuf, len); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); - out_kfree: - kfree(kbuf); return error; } @@ -540,20 +535,16 @@ xfs_attrmulti_by_handle( if (!size || size > 16 * PAGE_SIZE) goto out_dput; - error = ENOMEM; - ops = kmalloc(size, GFP_KERNEL); - if (!ops) + ops = memdup_user(am_hreq.ops, size); + if (IS_ERR(ops)) { + error = PTR_ERR(ops); goto out_dput; - - error = EFAULT; - if (copy_from_user(ops, am_hreq.ops, size)) - goto out_kfree_ops; + } attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); if (!attr_name) goto out_kfree_ops; - error = 0; for (i = 0; i < am_hreq.opcount; i++) { ops[i].am_error = strncpy_from_user(attr_name, diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index c70c4e3db79..0882d166239 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -427,20 +427,16 @@ xfs_compat_attrmulti_by_handle( if (!size || size > 16 * PAGE_SIZE) goto out_dput; - error = ENOMEM; - ops = kmalloc(size, GFP_KERNEL); - if (!ops) + ops = memdup_user(compat_ptr(am_hreq.ops), size); + if (IS_ERR(ops)) { + error = PTR_ERR(ops); goto out_dput; - - error = EFAULT; - if (copy_from_user(ops, compat_ptr(am_hreq.ops), size)) - goto out_kfree_ops; + } attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); if (!attr_name) goto out_kfree_ops; - error = 0; for (i = 0; i < am_hreq.opcount; i++) { ops[i].am_error = strncpy_from_user(attr_name, diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 7e90daa0d1d..9142192ccbe 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -751,10 +751,26 @@ start: goto relock; } } else { + int enospc = 0; + ssize_t ret2 = 0; + +write_retry: xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs, *offset, ioflags); - ret = generic_file_buffered_write(iocb, iovp, segs, + ret2 = generic_file_buffered_write(iocb, iovp, segs, pos, offset, count, ret); + /* + * if we just got an ENOSPC, flush the inode now we + * aren't holding any page locks and retry *once* + */ + if (ret2 == -ENOSPC && !enospc) { + error = xfs_flush_pages(xip, 0, -1, 0, FI_NONE); + if (error) + goto out_unlock_internal; + enospc = 1; + goto write_retry; + } + ret = ret2; } current->backing_dev_info = NULL; diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index a608e72fa40..f7ba76633c2 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -62,12 +62,6 @@ xfs_sync_inodes_ag( uint32_t first_index = 0; int error = 0; int last_error = 0; - int fflag = XFS_B_ASYNC; - - if (flags & SYNC_DELWRI) - fflag = XFS_B_DELWRI; - if (flags & SYNC_WAIT) - fflag = 0; /* synchronous overrides all */ do { struct inode *inode; @@ -128,11 +122,23 @@ xfs_sync_inodes_ag( * If we have to flush data or wait for I/O completion * we need to hold the iolock. */ - if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) { - xfs_ilock(ip, XFS_IOLOCK_SHARED); - lock_flags |= XFS_IOLOCK_SHARED; - error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); - if (flags & SYNC_IOWAIT) + if (flags & SYNC_DELWRI) { + if (VN_DIRTY(inode)) { + if (flags & SYNC_TRYLOCK) { + if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) + lock_flags |= XFS_IOLOCK_SHARED; + } else { + xfs_ilock(ip, XFS_IOLOCK_SHARED); + lock_flags |= XFS_IOLOCK_SHARED; + } + if (lock_flags & XFS_IOLOCK_SHARED) { + error = xfs_flush_pages(ip, 0, -1, + (flags & SYNC_WAIT) ? 0 + : XFS_B_ASYNC, + FI_NONE); + } + } + if (VN_CACHED(inode) && (flags & SYNC_IOWAIT)) xfs_ioend_wait(ip); } xfs_ilock(ip, XFS_ILOCK_SHARED); @@ -398,15 +404,17 @@ STATIC void xfs_syncd_queue_work( struct xfs_mount *mp, void *data, - void (*syncer)(struct xfs_mount *, void *)) + void (*syncer)(struct xfs_mount *, void *), + struct completion *completion) { - struct bhv_vfs_sync_work *work; + struct xfs_sync_work *work; - work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP); + work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP); INIT_LIST_HEAD(&work->w_list); work->w_syncer = syncer; work->w_data = data; work->w_mount = mp; + work->w_completion = completion; spin_lock(&mp->m_sync_lock); list_add_tail(&work->w_list, &mp->m_sync_list); spin_unlock(&mp->m_sync_lock); @@ -420,49 +428,26 @@ xfs_syncd_queue_work( * heads, looking about for more room... */ STATIC void -xfs_flush_inode_work( - struct xfs_mount *mp, - void *arg) -{ - struct inode *inode = arg; - filemap_flush(inode->i_mapping); - iput(inode); -} - -void -xfs_flush_inode( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - igrab(inode); - xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); - delay(msecs_to_jiffies(500)); -} - -/* - * This is the "bigger hammer" version of xfs_flush_inode_work... - * (IOW, "If at first you don't succeed, use a Bigger Hammer"). - */ -STATIC void -xfs_flush_device_work( +xfs_flush_inodes_work( struct xfs_mount *mp, void *arg) { struct inode *inode = arg; - sync_blockdev(mp->m_super->s_bdev); + xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK); + xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT); iput(inode); } void -xfs_flush_device( +xfs_flush_inodes( xfs_inode_t *ip) { struct inode *inode = VFS_I(ip); + DECLARE_COMPLETION_ONSTACK(completion); igrab(inode); - xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); - delay(msecs_to_jiffies(500)); + xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion); + wait_for_completion(&completion); xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); } @@ -497,7 +482,7 @@ xfssyncd( { struct xfs_mount *mp = arg; long timeleft; - bhv_vfs_sync_work_t *work, *n; + xfs_sync_work_t *work, *n; LIST_HEAD (tmp); set_freezable(); @@ -532,6 +517,8 @@ xfssyncd( list_del(&work->w_list); if (work == &mp->m_sync_work) continue; + if (work->w_completion) + complete(work->w_completion); kmem_free(work); } } @@ -545,6 +532,7 @@ xfs_syncd_init( { mp->m_sync_work.w_syncer = xfs_sync_worker; mp->m_sync_work.w_mount = mp; + mp->m_sync_work.w_completion = NULL; mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); if (IS_ERR(mp->m_sync_task)) return -PTR_ERR(mp->m_sync_task); diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 04f058c848a..308d5bf6dfb 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -21,18 +21,20 @@ struct xfs_mount; struct xfs_perag; -typedef struct bhv_vfs_sync_work { +typedef struct xfs_sync_work { struct list_head w_list; struct xfs_mount *w_mount; void *w_data; /* syncer routine argument */ void (*w_syncer)(struct xfs_mount *, void *); -} bhv_vfs_sync_work_t; + struct completion *w_completion; +} xfs_sync_work_t; #define SYNC_ATTR 0x0001 /* sync attributes */ #define SYNC_DELWRI 0x0002 /* look at delayed writes */ #define SYNC_WAIT 0x0004 /* wait for i/o to complete */ #define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */ #define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */ +#define SYNC_TRYLOCK 0x0020 /* only try to lock inodes */ int xfs_syncd_init(struct xfs_mount *mp); void xfs_syncd_stop(struct xfs_mount *mp); @@ -43,8 +45,7 @@ int xfs_sync_fsdata(struct xfs_mount *mp, int flags); int xfs_quiesce_data(struct xfs_mount *mp); void xfs_quiesce_attr(struct xfs_mount *mp); -void xfs_flush_inode(struct xfs_inode *ip); -void xfs_flush_device(struct xfs_inode *ip); +void xfs_flush_inodes(struct xfs_inode *ip); int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode); int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode); diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3a6ed426327..ca7c6005a48 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -5880,7 +5880,7 @@ xfs_getbmap( void *arg) /* formatter arg */ { __int64_t bmvend; /* last block requested */ - int error; /* return value */ + int error = 0; /* return value */ __int64_t fixlen; /* length for -1 case */ int i; /* extent number */ int lock; /* lock state */ @@ -5890,39 +5890,18 @@ xfs_getbmap( int nexleft; /* # of user extents left */ int subnex; /* # of bmapi's can do */ int nmap; /* number of map entries */ - struct getbmapx out; /* output structure */ + struct getbmapx *out; /* output structure */ int whichfork; /* data or attr fork */ int prealloced; /* this is a file with * preallocated data space */ int iflags; /* interface flags */ int bmapi_flags; /* flags for xfs_bmapi */ + int cur_ext = 0; mp = ip->i_mount; iflags = bmv->bmv_iflags; - whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; - /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not - * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ - * bit is set for the file, generate a read event in order - * that the DMAPI application may do its thing before we return - * the extents. Usually this means restoring user file data to - * regions of the file that look like holes. - * - * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify - * BMV_IF_NO_DMAPI_READ so that read events are generated. - * If this were not true, callers of ioctl( XFS_IOC_GETBMAP ) - * could misinterpret holes in a DMAPI file as true holes, - * when in fact they may represent offline user data. - */ - if ((iflags & BMV_IF_NO_DMAPI_READ) == 0 && - DM_EVENT_ENABLED(ip, DM_EVENT_READ) && - whichfork == XFS_DATA_FORK) { - error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL); - if (error) - return XFS_ERROR(error); - } - if (whichfork == XFS_ATTR_FORK) { if (XFS_IFORK_Q(ip)) { if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && @@ -5936,11 +5915,37 @@ xfs_getbmap( ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } - } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && - ip->i_d.di_format != XFS_DINODE_FMT_BTREE && - ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) - return XFS_ERROR(EINVAL); - if (whichfork == XFS_DATA_FORK) { + + prealloced = 0; + fixlen = 1LL << 32; + } else { + /* + * If the BMV_IF_NO_DMAPI_READ interface bit specified, do + * not generate a DMAPI read event. Otherwise, if the + * DM_EVENT_READ bit is set for the file, generate a read + * event in order that the DMAPI application may do its thing + * before we return the extents. Usually this means restoring + * user file data to regions of the file that look like holes. + * + * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify + * BMV_IF_NO_DMAPI_READ so that read events are generated. + * If this were not true, callers of ioctl(XFS_IOC_GETBMAP) + * could misinterpret holes in a DMAPI file as true holes, + * when in fact they may represent offline user data. + */ + if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && + !(iflags & BMV_IF_NO_DMAPI_READ)) { + error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, + 0, 0, 0, NULL); + if (error) + return XFS_ERROR(error); + } + + if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_format != XFS_DINODE_FMT_BTREE && + ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EINVAL); + if (xfs_get_extsz_hint(ip) || ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ prealloced = 1; @@ -5949,42 +5954,41 @@ xfs_getbmap( prealloced = 0; fixlen = ip->i_size; } - } else { - prealloced = 0; - fixlen = 1LL << 32; } if (bmv->bmv_length == -1) { fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); - bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset), - (__int64_t)0); - } else if (bmv->bmv_length < 0) - return XFS_ERROR(EINVAL); - if (bmv->bmv_length == 0) { + bmv->bmv_length = + max_t(__int64_t, fixlen - bmv->bmv_offset, 0); + } else if (bmv->bmv_length == 0) { bmv->bmv_entries = 0; return 0; + } else if (bmv->bmv_length < 0) { + return XFS_ERROR(EINVAL); } + nex = bmv->bmv_count - 1; if (nex <= 0) return XFS_ERROR(EINVAL); bmvend = bmv->bmv_offset + bmv->bmv_length; - xfs_ilock(ip, XFS_IOLOCK_SHARED); - if (((iflags & BMV_IF_DELALLOC) == 0) && - (whichfork == XFS_DATA_FORK) && - (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) { - /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ - error = xfs_flush_pages(ip, (xfs_off_t)0, - -1, 0, FI_REMAPF); - if (error) { - xfs_iunlock(ip, XFS_IOLOCK_SHARED); - return error; + if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) + return XFS_ERROR(ENOMEM); + out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL); + if (!out) + return XFS_ERROR(ENOMEM); + + xfs_ilock(ip, XFS_IOLOCK_SHARED); + if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { + if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) { + error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF); + if (error) + goto out_unlock_iolock; } - } - ASSERT(whichfork == XFS_ATTR_FORK || (iflags & BMV_IF_DELALLOC) || - ip->i_delayed_blks == 0); + ASSERT(ip->i_delayed_blks == 0); + } lock = xfs_ilock_map_shared(ip); @@ -5995,23 +5999,25 @@ xfs_getbmap( if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; - bmapi_flags = xfs_bmapi_aflag(whichfork) | - ((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE); + bmapi_flags = xfs_bmapi_aflag(whichfork); + if (!(iflags & BMV_IF_PREALLOC)) + bmapi_flags |= XFS_BMAPI_IGSTATE; /* * Allocate enough space to handle "subnex" maps at a time. */ + error = ENOMEM; subnex = 16; - map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP); + map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL); + if (!map) + goto out_unlock_ilock; bmv->bmv_entries = 0; - if ((XFS_IFORK_NEXTENTS(ip, whichfork) == 0)) { - if (((iflags & BMV_IF_DELALLOC) == 0) || - whichfork == XFS_ATTR_FORK) { - error = 0; - goto unlock_and_return; - } + if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 && + (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) { + error = 0; + goto out_free_map; } nexleft = nex; @@ -6023,53 +6029,61 @@ xfs_getbmap( bmapi_flags, NULL, 0, map, &nmap, NULL, NULL); if (error) - goto unlock_and_return; + goto out_free_map; ASSERT(nmap <= subnex); for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { - out.bmv_oflags = 0; + out[cur_ext].bmv_oflags = 0; if (map[i].br_state == XFS_EXT_UNWRITTEN) - out.bmv_oflags |= BMV_OF_PREALLOC; + out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; else if (map[i].br_startblock == DELAYSTARTBLOCK) - out.bmv_oflags |= BMV_OF_DELALLOC; - out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff); - out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount); - out.bmv_unused1 = out.bmv_unused2 = 0; + out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC; + out[cur_ext].bmv_offset = + XFS_FSB_TO_BB(mp, map[i].br_startoff); + out[cur_ext].bmv_length = + XFS_FSB_TO_BB(mp, map[i].br_blockcount); + out[cur_ext].bmv_unused1 = 0; + out[cur_ext].bmv_unused2 = 0; ASSERT(((iflags & BMV_IF_DELALLOC) != 0) || (map[i].br_startblock != DELAYSTARTBLOCK)); if (map[i].br_startblock == HOLESTARTBLOCK && whichfork == XFS_ATTR_FORK) { /* came to the end of attribute fork */ - out.bmv_oflags |= BMV_OF_LAST; - goto unlock_and_return; - } else { - int full = 0; /* user array is full */ - - if (!xfs_getbmapx_fix_eof_hole(ip, &out, - prealloced, bmvend, - map[i].br_startblock)) { - goto unlock_and_return; - } - - /* format results & advance arg */ - error = formatter(&arg, &out, &full); - if (error || full) - goto unlock_and_return; - nexleft--; - bmv->bmv_offset = - out.bmv_offset + out.bmv_length; - bmv->bmv_length = MAX((__int64_t)0, - (__int64_t)(bmvend - bmv->bmv_offset)); - bmv->bmv_entries++; + out[cur_ext].bmv_oflags |= BMV_OF_LAST; + goto out_free_map; } + + if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext], + prealloced, bmvend, + map[i].br_startblock)) + goto out_free_map; + + nexleft--; + bmv->bmv_offset = + out[cur_ext].bmv_offset + + out[cur_ext].bmv_length; + bmv->bmv_length = + max_t(__int64_t, 0, bmvend - bmv->bmv_offset); + bmv->bmv_entries++; + cur_ext++; } } while (nmap && nexleft && bmv->bmv_length); -unlock_and_return: + out_free_map: + kmem_free(map); + out_unlock_ilock: xfs_iunlock_map_shared(ip, lock); + out_unlock_iolock: xfs_iunlock(ip, XFS_IOLOCK_SHARED); - kmem_free(map); + for (i = 0; i < cur_ext; i++) { + int full = 0; /* user array is full */ + + /* format results & advance arg */ + error = formatter(&arg, &out[i], &full); + if (error || full) + break; + } return error; } diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 478e587087f..89b81eedce6 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -69,15 +69,6 @@ xfs_inode_alloc( ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); - /* - * initialise the VFS inode here to get failures - * out of the way early. - */ - if (!inode_init_always(mp->m_super, VFS_I(ip))) { - kmem_zone_free(xfs_inode_zone, ip); - return NULL; - } - /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; @@ -113,6 +104,20 @@ xfs_inode_alloc( #ifdef XFS_DIR2_TRACE ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); #endif + /* + * Now initialise the VFS inode. We do this after the xfs_inode + * initialisation as internal failures will result in ->destroy_inode + * being called and that will pass down through the reclaim path and + * free the XFS inode. This path requires the XFS inode to already be + * initialised. Hence if this call fails, the xfs_inode has already + * been freed and we should not reference it at all in the error + * handling. + */ + if (!inode_init_always(mp->m_super, VFS_I(ip))) + return NULL; + + /* prevent anyone from using this yet */ + VFS_I(ip)->i_state = I_NEW|I_LOCK; return ip; } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index e7ae08d1df4..123b20c8cbf 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1258,8 +1258,10 @@ xfs_file_last_byte( * necessary. */ if (ip->i_df.if_flags & XFS_IFEXTENTS) { + xfs_ilock(ip, XFS_ILOCK_SHARED); error = xfs_bmap_last_offset(NULL, ip, &last_block, XFS_DATA_FORK); + xfs_iunlock(ip, XFS_ILOCK_SHARED); if (error) { last_block = 0; } diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 08ce72316bf..5aaa2d7ec15 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -338,38 +338,6 @@ xfs_iomap_eof_align_last_fsb( } STATIC int -xfs_flush_space( - xfs_inode_t *ip, - int *fsynced, - int *ioflags) -{ - switch (*fsynced) { - case 0: - if (ip->i_delayed_blks) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_flush_inode(ip); - xfs_ilock(ip, XFS_ILOCK_EXCL); - *fsynced = 1; - } else { - *ioflags |= BMAPI_SYNC; - *fsynced = 2; - } - return 0; - case 1: - *fsynced = 2; - *ioflags |= BMAPI_SYNC; - return 0; - case 2: - xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_flush_device(ip); - xfs_ilock(ip, XFS_ILOCK_EXCL); - *fsynced = 3; - return 0; - } - return 1; -} - -STATIC int xfs_cmn_err_fsblock_zero( xfs_inode_t *ip, xfs_bmbt_irec_t *imap) @@ -538,15 +506,9 @@ error_out: } /* - * If the caller is doing a write at the end of the file, - * then extend the allocation out to the file system's write - * iosize. We clean up any extra space left over when the - * file is closed in xfs_inactive(). - * - * For sync writes, we are flushing delayed allocate space to - * try to make additional space available for allocation near - * the filesystem full boundary - preallocation hurts in that - * situation, of course. + * If the caller is doing a write at the end of the file, then extend the + * allocation out to the file system's write iosize. We clean up any extra + * space left over when the file is closed in xfs_inactive(). */ STATIC int xfs_iomap_eof_want_preallocate( @@ -565,7 +527,7 @@ xfs_iomap_eof_want_preallocate( int n, error, imaps; *prealloc = 0; - if ((ioflag & BMAPI_SYNC) || (offset + count) <= ip->i_size) + if ((offset + count) <= ip->i_size) return 0; /* @@ -611,7 +573,7 @@ xfs_iomap_write_delay( xfs_extlen_t extsz; int nimaps; xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; - int prealloc, fsynced = 0; + int prealloc, flushed = 0; int error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); @@ -627,12 +589,12 @@ xfs_iomap_write_delay( extsz = xfs_get_extsz_hint(ip); offset_fsb = XFS_B_TO_FSBT(mp, offset); -retry: error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, ioflag, imap, XFS_WRITE_IMAPS, &prealloc); if (error) return error; +retry: if (prealloc) { aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); ioalign = XFS_B_TO_FSBT(mp, aligned_offset); @@ -659,15 +621,22 @@ retry: /* * If bmapi returned us nothing, and if we didn't get back EDQUOT, - * then we must have run out of space - flush delalloc, and retry.. + * then we must have run out of space - flush all other inodes with + * delalloc blocks and retry without EOF preallocation. */ if (nimaps == 0) { xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, ip, offset, count); - if (xfs_flush_space(ip, &fsynced, &ioflag)) + if (flushed) return XFS_ERROR(ENOSPC); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_flush_inodes(ip); + xfs_ilock(ip, XFS_ILOCK_EXCL); + + flushed = 1; error = 0; + prealloc = 0; goto retry; } diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index a1cc1322fc0..fdcf7b82747 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -40,8 +40,7 @@ typedef enum { BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */ BMAPI_MMAP = (1 << 6), /* allocate for mmap write */ - BMAPI_SYNC = (1 << 7), /* sync write to flush delalloc space */ - BMAPI_TRYLOCK = (1 << 8), /* non-blocking request */ + BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */ } bmapi_flags_t; diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index f76c6d7cea2..3750f04ede0 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -562,9 +562,8 @@ xfs_log_mount( } mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); - if (!mp->m_log) { - cmn_err(CE_WARN, "XFS: Log allocation failed: No memory!"); - error = ENOMEM; + if (IS_ERR(mp->m_log)) { + error = -PTR_ERR(mp->m_log); goto out; } @@ -1180,10 +1179,13 @@ xlog_alloc_log(xfs_mount_t *mp, xfs_buf_t *bp; int i; int iclogsize; + int error = ENOMEM; log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL); - if (!log) - return NULL; + if (!log) { + xlog_warn("XFS: Log allocation failed: No memory!"); + goto out; + } log->l_mp = mp; log->l_targ = log_target; @@ -1201,19 +1203,35 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_grant_reserve_cycle = 1; log->l_grant_write_cycle = 1; + error = EFSCORRUPTED; if (xfs_sb_version_hassector(&mp->m_sb)) { log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT; - ASSERT(log->l_sectbb_log <= mp->m_sectbb_log); + if (log->l_sectbb_log < 0 || + log->l_sectbb_log > mp->m_sectbb_log) { + xlog_warn("XFS: Log sector size (0x%x) out of range.", + log->l_sectbb_log); + goto out_free_log; + } + /* for larger sector sizes, must have v2 or external log */ - ASSERT(log->l_sectbb_log == 0 || - log->l_logBBstart == 0 || - xfs_sb_version_haslogv2(&mp->m_sb)); - ASSERT(mp->m_sb.sb_logsectlog >= BBSHIFT); + if (log->l_sectbb_log != 0 && + (log->l_logBBstart != 0 && + !xfs_sb_version_haslogv2(&mp->m_sb))) { + xlog_warn("XFS: log sector size (0x%x) invalid " + "for configuration.", log->l_sectbb_log); + goto out_free_log; + } + if (mp->m_sb.sb_logsectlog < BBSHIFT) { + xlog_warn("XFS: Log sector log (0x%x) too small.", + mp->m_sb.sb_logsectlog); + goto out_free_log; + } } log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1; xlog_get_iclog_buffer_size(mp, log); + error = ENOMEM; bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp); if (!bp) goto out_free_log; @@ -1313,7 +1331,8 @@ out_free_iclog: xfs_buf_free(log->l_xbuf); out_free_log: kmem_free(log); - return NULL; +out: + return ERR_PTR(-error); } /* xlog_alloc_log */ @@ -2541,18 +2560,19 @@ redo: xlog_ins_ticketq(&log->l_reserve_headq, tic); xlog_trace_loggrant(log, tic, "xlog_grant_log_space: sleep 2"); + spin_unlock(&log->l_grant_lock); + xlog_grant_push_ail(log->l_mp, need_bytes); + spin_lock(&log->l_grant_lock); + XFS_STATS_INC(xs_sleep_logspace); sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); - if (XLOG_FORCED_SHUTDOWN(log)) { - spin_lock(&log->l_grant_lock); + spin_lock(&log->l_grant_lock); + if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; - } xlog_trace_loggrant(log, tic, "xlog_grant_log_space: wake 2"); - xlog_grant_push_ail(log->l_mp, need_bytes); - spin_lock(&log->l_grant_lock); goto redo; } else if (tic->t_flags & XLOG_TIC_IN_Q) xlog_del_ticketq(&log->l_reserve_headq, tic); @@ -2631,7 +2651,7 @@ xlog_regrant_write_log_space(xlog_t *log, * for more free space, otherwise try to get some space for * this transaction. */ - + need_bytes = tic->t_unit_res; if ((ntic = log->l_write_headq)) { free_bytes = xlog_space_left(log, log->l_grant_write_cycle, log->l_grant_write_bytes); @@ -2651,26 +2671,25 @@ xlog_regrant_write_log_space(xlog_t *log, xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: sleep 1"); + spin_unlock(&log->l_grant_lock); + xlog_grant_push_ail(log->l_mp, need_bytes); + spin_lock(&log->l_grant_lock); + XFS_STATS_INC(xs_sleep_logspace); sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); /* If we're shutting down, this tic is already * off the queue */ - if (XLOG_FORCED_SHUTDOWN(log)) { - spin_lock(&log->l_grant_lock); + spin_lock(&log->l_grant_lock); + if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; - } xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: wake 1"); - xlog_grant_push_ail(log->l_mp, tic->t_unit_res); - spin_lock(&log->l_grant_lock); } } - need_bytes = tic->t_unit_res; - redo: if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; @@ -2680,19 +2699,20 @@ redo: if (free_bytes < need_bytes) { if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) xlog_ins_ticketq(&log->l_write_headq, tic); + spin_unlock(&log->l_grant_lock); + xlog_grant_push_ail(log->l_mp, need_bytes); + spin_lock(&log->l_grant_lock); + XFS_STATS_INC(xs_sleep_logspace); sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); /* If we're shutting down, this tic is already off the queue */ - if (XLOG_FORCED_SHUTDOWN(log)) { - spin_lock(&log->l_grant_lock); + spin_lock(&log->l_grant_lock); + if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; - } xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: wake 2"); - xlog_grant_push_ail(log->l_mp, need_bytes); - spin_lock(&log->l_grant_lock); goto redo; } else if (tic->t_flags & XLOG_TIC_IN_Q) xlog_del_ticketq(&log->l_write_headq, tic); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index b101990df02..65a99725d0c 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -291,14 +291,17 @@ xfs_mount_validate_sb( sbp->sb_sectsize > XFS_MAX_SECTORSIZE || sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG || sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG || + sbp->sb_sectsize != (1 << sbp->sb_sectlog) || sbp->sb_blocksize < XFS_MIN_BLOCKSIZE || sbp->sb_blocksize > XFS_MAX_BLOCKSIZE || sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || + sbp->sb_blocksize != (1 << sbp->sb_blocklog) || sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || sbp->sb_inodelog < XFS_DINODE_MIN_LOG || sbp->sb_inodelog > XFS_DINODE_MAX_LOG || + sbp->sb_inodesize != (1 << sbp->sb_inodelog) || (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 7af44adffc8..d6a64392f98 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -313,7 +313,7 @@ typedef struct xfs_mount { #endif struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ struct task_struct *m_sync_task; /* generalised sync thread */ - bhv_vfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */ + xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */ struct list_head m_sync_list; /* sync thread work item list */ spinlock_t m_sync_lock; /* work item list lock */ int m_sync_seq; /* sync thread generation no. */ diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 7394c7af5de..19cf90a9c76 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -1457,6 +1457,13 @@ xfs_create( error = xfs_trans_reserve(tp, resblks, log_res, 0, XFS_TRANS_PERM_LOG_RES, log_count); if (error == ENOSPC) { + /* flush outstanding delalloc blocks and retry */ + xfs_flush_inodes(dp); + error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); + } + if (error == ENOSPC) { + /* No space at all so try a "no-allocation" reservation */ resblks = 0; error = xfs_trans_reserve(tp, 0, log_res, 0, XFS_TRANS_PERM_LOG_RES, log_count); |