From f18df228997fb716990590d248663981a15f17d4 Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Tue, 13 Jan 2009 16:43:09 +0100 Subject: quota: Add quota reservation support Delayed allocation defers the block allocation at the dirty pages flush-out time, doing quota charge/check at that time is too late. But we can't charge the quota blocks until blocks are really allocated, otherwise users could get overcharged after reboot from system crash. This patch adds quota reservation for delayed allocation. Quota blocks are reserved in memory, inode and quota won't gets dirtied until later block allocation time. Signed-off-by: Mingming Cao Signed-off-by: Jan Kara --- fs/dquot.c | 117 +++++++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 86 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index bca3cac4bee..9b1c4d3c9d8 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -899,6 +899,11 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_curspace += number; } +static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_rsvspace += number; +} + static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || @@ -1068,7 +1073,11 @@ err_out: kfree_skb(skb); } #endif - +/* + * Write warnings to the console and send warning messages over netlink. + * + * Note that this function can sleep. + */ static inline void flush_warnings(struct dquot * const *dquots, char *warntype) { int i; @@ -1129,13 +1138,18 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) /* needs dq_data_lock */ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) { + qsize_t tspace; + *warntype = QUOTA_NL_NOWARN; if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; + tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace + + space; + if (dquot->dq_dqb.dqb_bhardlimit && - dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit && + tspace > dquot->dq_dqb.dqb_bhardlimit && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = QUOTA_NL_BHARDWARN; @@ -1143,7 +1157,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war } if (dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && + tspace > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (!prealloc) @@ -1152,7 +1166,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war } if (dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && + tspace > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime == 0) { if (!prealloc) { *warntype = QUOTA_NL_BSOFTWARN; @@ -1306,51 +1320,92 @@ void vfs_dq_drop(struct inode *inode) /* * This operation can block, but only after everything is updated */ -int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) +int __dquot_alloc_space(struct inode *inode, qsize_t number, + int warn, int reserve) { - int cnt, ret = NO_QUOTA; + int cnt, ret = QUOTA_OK; char warntype[MAXQUOTAS]; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) { -out_add: - inode_add_bytes(inode, number); - return QUOTA_OK; - } for (cnt = 0; cnt < MAXQUOTAS; cnt++) warntype[cnt] = QUOTA_NL_NOWARN; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */ - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - goto out_add; - } spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (inode->i_dquot[cnt] == NODQUOT) continue; - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA) - goto warn_put_all; + if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) + == NO_QUOTA) { + ret = NO_QUOTA; + goto out_unlock; + } } for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (inode->i_dquot[cnt] == NODQUOT) continue; - dquot_incr_space(inode->i_dquot[cnt], number); + if (reserve) + dquot_resv_space(inode->i_dquot[cnt], number); + else + dquot_incr_space(inode->i_dquot[cnt], number); } - inode_add_bytes(inode, number); - ret = QUOTA_OK; -warn_put_all: + if (!reserve) + inode_add_bytes(inode, number); +out_unlock: spin_unlock(&dq_data_lock); - if (ret == QUOTA_OK) - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); flush_warnings(inode->i_dquot, warntype); + return ret; +} + +int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) +{ + int cnt, ret = QUOTA_OK; + + /* + * First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex + */ + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out_unlock; + } + + ret = __dquot_alloc_space(inode, number, warn, 0); + if (ret == NO_QUOTA) + goto out_unlock; + + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); +out_unlock: up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} + +int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) +{ + int ret = QUOTA_OK; + + if (IS_NOQUOTA(inode)) + goto out; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) + goto out_unlock; + + ret = __dquot_alloc_space(inode, number, warn, 1); +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: return ret; } +EXPORT_SYMBOL(dquot_reserve_space); /* * This operation can block, but only after everything is updated @@ -2057,7 +2112,7 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) spin_lock(&dq_data_lock); di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); - di->dqb_curspace = dm->dqb_curspace; + di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; di->dqb_ihardlimit = dm->dqb_ihardlimit; di->dqb_isoftlimit = dm->dqb_isoftlimit; di->dqb_curinodes = dm->dqb_curinodes; @@ -2097,7 +2152,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) spin_lock(&dq_data_lock); if (di->dqb_valid & QIF_SPACE) { - dm->dqb_curspace = di->dqb_curspace; + dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; check_blim = 1; __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); } -- cgit v1.2.3-70-g09d2 From 740d9dcd949a986c88886a591054a0cdb89ef669 Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Tue, 13 Jan 2009 16:43:14 +0100 Subject: quota: Add quota reservation claim and released operations Reserved quota will be claimed at the block allocation time. Over-booked quota could be returned back with the release callback function. Signed-off-by: Mingming Cao Signed-off-by: Jan Kara --- fs/dquot.c | 110 +++++++++++++++++++++++++++++++++++++++++++++-- include/linux/quota.h | 6 +++ include/linux/quotaops.h | 53 +++++++++++++++++++++++ 3 files changed, 165 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 9b1c4d3c9d8..2916f91ca40 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -904,6 +904,23 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_rsvspace += number; } +/* + * Claim reserved quota space + */ +static void dquot_claim_reserved_space(struct dquot *dquot, + qsize_t number) +{ + WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); + dquot->dq_dqb.dqb_curspace += number; + dquot->dq_dqb.dqb_rsvspace -= number; +} + +static inline +void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_rsvspace -= number; +} + static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || @@ -1452,6 +1469,72 @@ warn_put_all: return ret; } +int dquot_claim_space(struct inode *inode, qsize_t number) +{ + int cnt; + int ret = QUOTA_OK; + + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + inode_add_bytes(inode, number); + goto out; + } + + spin_lock(&dq_data_lock); + /* Claim reserved quotas to allocated quotas */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT) + dquot_claim_reserved_space(inode->i_dquot[cnt], + number); + } + /* Update inode bytes */ + inode_add_bytes(inode, number); + spin_unlock(&dq_data_lock); + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} +EXPORT_SYMBOL(dquot_claim_space); + +/* + * Release reserved quota space + */ +void dquot_release_reserved_space(struct inode *inode, qsize_t number) +{ + int cnt; + + if (IS_NOQUOTA(inode)) + goto out; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) + goto out_unlock; + + spin_lock(&dq_data_lock); + /* Release reserved dquots */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT) + dquot_free_reserved_space(inode->i_dquot[cnt], number); + } + spin_unlock(&dq_data_lock); + +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return; +} +EXPORT_SYMBOL(dquot_release_reserved_space); + /* * This operation can block, but only after everything is updated */ @@ -1528,6 +1611,19 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) return QUOTA_OK; } +/* + * call back function, get reserved quota space from underlying fs + */ +qsize_t dquot_get_reserved_space(struct inode *inode) +{ + qsize_t reserved_space = 0; + + if (sb_any_quota_active(inode->i_sb) && + inode->i_sb->dq_op->get_reserved_space) + reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); + return reserved_space; +} + /* * Transfer the number of inode and blocks from one diskquota to an other. * @@ -1536,7 +1632,8 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) */ int dquot_transfer(struct inode *inode, struct iattr *iattr) { - qsize_t space; + qsize_t space, cur_space; + qsize_t rsv_space = 0; struct dquot *transfer_from[MAXQUOTAS]; struct dquot *transfer_to[MAXQUOTAS]; int cnt, ret = QUOTA_OK; @@ -1575,7 +1672,9 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) goto put_all; } spin_lock(&dq_data_lock); - space = inode_get_bytes(inode); + cur_space = inode_get_bytes(inode); + rsv_space = dquot_get_reserved_space(inode); + space = cur_space + rsv_space; /* Build the transfer_from list and check the limits */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (transfer_to[cnt] == NODQUOT) @@ -1604,11 +1703,14 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) warntype_from_space[cnt] = info_bdq_free(transfer_from[cnt], space); dquot_decr_inodes(transfer_from[cnt], 1); - dquot_decr_space(transfer_from[cnt], space); + dquot_decr_space(transfer_from[cnt], cur_space); + dquot_free_reserved_space(transfer_from[cnt], + rsv_space); } dquot_incr_inodes(transfer_to[cnt], 1); - dquot_incr_space(transfer_to[cnt], space); + dquot_incr_space(transfer_to[cnt], cur_space); + dquot_resv_space(transfer_to[cnt], rsv_space); inode->i_dquot[cnt] = transfer_to[cnt]; } diff --git a/include/linux/quota.h b/include/linux/quota.h index 54b837fa64f..a510d91561f 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -311,6 +311,12 @@ struct dquot_operations { int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ /* reserve quota for delayed block allocation */ int (*reserve_space) (struct inode *, qsize_t, int); + /* claim reserved quota for delayed alloc */ + int (*claim_space) (struct inode *, qsize_t); + /* release rsved quota for delayed alloc */ + void (*release_rsv) (struct inode *, qsize_t); + /* get reserved quota for delayed alloc */ + qsize_t (*get_reserved_space) (struct inode *); }; /* Operations handling requests from userspace */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 3e3a0d2874d..7369d04e0a8 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -35,6 +35,11 @@ void dquot_destroy(struct dquot *dquot); int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); int dquot_alloc_inode(const struct inode *inode, qsize_t number); +int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc); +int dquot_claim_space(struct inode *inode, qsize_t number); +void dquot_release_reserved_space(struct inode *inode, qsize_t number); +qsize_t dquot_get_reserved_space(struct inode *inode); + int dquot_free_space(struct inode *inode, qsize_t number); int dquot_free_inode(const struct inode *inode, qsize_t number); @@ -203,6 +208,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode) return 0; } +/* + * Convert in-memory reserved quotas to real consumed quotas + */ +static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) +{ + if (sb_any_quota_active(inode->i_sb)) { + if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA) + return 1; + } else + inode_add_bytes(inode, nr); + + mark_inode_dirty(inode); + return 0; +} + +/* + * Release reserved (in-memory) quotas + */ +static inline +void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) +{ + if (sb_any_quota_active(inode->i_sb)) + inode->i_sb->dq_op->release_rsv(inode, nr); +} + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_active(inode->i_sb)) @@ -354,6 +384,17 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) return 0; } +static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) +{ + return vfs_dq_alloc_space(inode, nr); +} + +static inline +int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) +{ + return 0; +} + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { inode_sub_bytes(inode, nr); @@ -397,6 +438,18 @@ static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr) nr << inode->i_blkbits); } +static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr) +{ + return vfs_dq_claim_space(inode, + nr << inode->i_blkbits); +} + +static inline +void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr) +{ + vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits); +} + static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) { vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); -- cgit v1.2.3-70-g09d2 From 08d0350ce9d63b19b62498c7b6421c2a95246b95 Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Wed, 14 Jan 2009 16:19:32 +0100 Subject: quota: Move EXPORT_SYMBOL immediately next to the functions/varibles According to checkpatch: EXPORT_SYMBOL(foo); should immediately follow its function/variable Signed-off-by: Mingming Cao Signed-off-by: Jan Kara --- fs/dquot.c | 69 ++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 33 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/dquot.c b/fs/dquot.c index 2916f91ca40..28aa1466760 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -132,6 +132,7 @@ static DEFINE_SPINLOCK(dq_list_lock); static DEFINE_SPINLOCK(dq_state_lock); DEFINE_SPINLOCK(dq_data_lock); +EXPORT_SYMBOL(dq_data_lock); static char *quotatypes[] = INITQFNAMES; static struct quota_format_type *quota_formats; /* List of registered formats */ @@ -148,6 +149,7 @@ int register_quota_format(struct quota_format_type *fmt) spin_unlock(&dq_list_lock); return 0; } +EXPORT_SYMBOL(register_quota_format); void unregister_quota_format(struct quota_format_type *fmt) { @@ -159,6 +161,7 @@ void unregister_quota_format(struct quota_format_type *fmt) *actqf = (*actqf)->qf_next; spin_unlock(&dq_list_lock); } +EXPORT_SYMBOL(unregister_quota_format); static struct quota_format_type *find_quota_format(int id) { @@ -215,6 +218,7 @@ static unsigned int dq_hash_bits, dq_hash_mask; static struct hlist_head *dquot_hash; struct dqstats dqstats; +EXPORT_SYMBOL(dqstats); static inline unsigned int hashfn(const struct super_block *sb, unsigned int id, int type) @@ -309,6 +313,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) spin_unlock(&dq_list_lock); return 0; } +EXPORT_SYMBOL(dquot_mark_dquot_dirty); /* This function needs dq_list_lock */ static inline int clear_dquot_dirty(struct dquot *dquot) @@ -360,6 +365,7 @@ out_iolock: mutex_unlock(&dquot->dq_lock); return ret; } +EXPORT_SYMBOL(dquot_acquire); /* * Write dquot to disk @@ -389,6 +395,7 @@ out_sem: mutex_unlock(&dqopt->dqio_mutex); return ret; } +EXPORT_SYMBOL(dquot_commit); /* * Release dquot @@ -417,6 +424,7 @@ out_dqlock: mutex_unlock(&dquot->dq_lock); return ret; } +EXPORT_SYMBOL(dquot_release); void dquot_destroy(struct dquot *dquot) { @@ -516,6 +524,7 @@ out: mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return ret; } +EXPORT_SYMBOL(dquot_scan_active); int vfs_quota_sync(struct super_block *sb, int type) { @@ -563,6 +572,7 @@ int vfs_quota_sync(struct super_block *sb, int type) return 0; } +EXPORT_SYMBOL(vfs_quota_sync); /* Free unused dquots from cache */ static void prune_dqcache(int count) @@ -672,6 +682,7 @@ we_slept: put_dquot_last(dquot); spin_unlock(&dq_list_lock); } +EXPORT_SYMBOL(dqput); struct dquot *dquot_alloc(struct super_block *sb, int type) { @@ -767,6 +778,7 @@ out: return dquot; } +EXPORT_SYMBOL(dqget); static int dqinit_needed(struct inode *inode, int type) { @@ -1282,6 +1294,7 @@ out_err: dqput(got[cnt]); return ret; } +EXPORT_SYMBOL(dquot_initialize); /* * Release all quotas referenced by inode @@ -1302,6 +1315,7 @@ int dquot_drop(struct inode *inode) dqput(put[cnt]); return 0; } +EXPORT_SYMBOL(dquot_drop); /* Wrapper to remove references to quota structures from inode */ void vfs_dq_drop(struct inode *inode) @@ -1324,6 +1338,7 @@ void vfs_dq_drop(struct inode *inode) inode->i_sb->dq_op->drop(inode); } } +EXPORT_SYMBOL(vfs_dq_drop); /* * Following four functions update i_blocks+i_bytes fields and @@ -1404,6 +1419,7 @@ out_unlock: out: return ret; } +EXPORT_SYMBOL(dquot_alloc_space); int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) { @@ -1468,6 +1484,7 @@ warn_put_all: up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); return ret; } +EXPORT_SYMBOL(dquot_alloc_inode); int dquot_claim_space(struct inode *inode, qsize_t number) { @@ -1574,6 +1591,7 @@ out_sub: up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); return QUOTA_OK; } +EXPORT_SYMBOL(dquot_free_space); /* * This operation can block, but only after everything is updated @@ -1610,6 +1628,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); return QUOTA_OK; } +EXPORT_SYMBOL(dquot_free_inode); /* * call back function, get reserved quota space from underlying fs @@ -1746,6 +1765,7 @@ over_quota: ret = NO_QUOTA; goto warn_put_all; } +EXPORT_SYMBOL(dquot_transfer); /* Wrapper for transferring ownership of an inode */ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) @@ -1757,7 +1777,7 @@ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) } return 0; } - +EXPORT_SYMBOL(vfs_dq_transfer); /* * Write info of quota file to disk @@ -1772,6 +1792,7 @@ int dquot_commit_info(struct super_block *sb, int type) mutex_unlock(&dqopt->dqio_mutex); return ret; } +EXPORT_SYMBOL(dquot_commit_info); /* * Definitions of diskquota operations. @@ -1924,13 +1945,14 @@ put_inodes: } return ret; } +EXPORT_SYMBOL(vfs_quota_disable); int vfs_quota_off(struct super_block *sb, int type, int remount) { return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); } - +EXPORT_SYMBOL(vfs_quota_off); /* * Turn quotas on on a device */ @@ -2087,6 +2109,7 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id, DQUOT_LIMITS_ENABLED); return error; } +EXPORT_SYMBOL(vfs_quota_on_path); int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, int remount) @@ -2104,6 +2127,7 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, } return error; } +EXPORT_SYMBOL(vfs_quota_on); /* * More powerful function for turning on quotas allowing setting @@ -2150,6 +2174,7 @@ out_lock: load_quota: return vfs_load_quota_inode(inode, type, format_id, flags); } +EXPORT_SYMBOL(vfs_quota_enable); /* * This function is used when filesystem needs to initialize quotas @@ -2179,6 +2204,7 @@ out: dput(dentry); return error; } +EXPORT_SYMBOL(vfs_quota_on_mount); /* Wrapper to turn on quotas when remounting rw */ int vfs_dq_quota_on_remount(struct super_block *sb) @@ -2195,6 +2221,7 @@ int vfs_dq_quota_on_remount(struct super_block *sb) } return ret; } +EXPORT_SYMBOL(vfs_dq_quota_on_remount); static inline qsize_t qbtos(qsize_t blocks) { @@ -2236,6 +2263,7 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d return 0; } +EXPORT_SYMBOL(vfs_get_dqblk); /* Generic routine for setting common part of quota structure */ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) @@ -2327,6 +2355,7 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d out: return rc; } +EXPORT_SYMBOL(vfs_set_dqblk); /* Generic routine for getting common part of quota file information */ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) @@ -2348,6 +2377,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } +EXPORT_SYMBOL(vfs_get_dqinfo); /* Generic routine for setting common part of quota file information */ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) @@ -2376,6 +2406,7 @@ out: mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return err; } +EXPORT_SYMBOL(vfs_set_dqinfo); struct quotactl_ops vfs_quotactl_ops = { .quota_on = vfs_quota_on, @@ -2531,37 +2562,3 @@ static int __init dquot_init(void) return 0; } module_init(dquot_init); - -EXPORT_SYMBOL(register_quota_format); -EXPORT_SYMBOL(unregister_quota_format); -EXPORT_SYMBOL(dqstats); -EXPORT_SYMBOL(dq_data_lock); -EXPORT_SYMBOL(vfs_quota_enable); -EXPORT_SYMBOL(vfs_quota_on); -EXPORT_SYMBOL(vfs_quota_on_path); -EXPORT_SYMBOL(vfs_quota_on_mount); -EXPORT_SYMBOL(vfs_quota_disable); -EXPORT_SYMBOL(vfs_quota_off); -EXPORT_SYMBOL(dquot_scan_active); -EXPORT_SYMBOL(vfs_quota_sync); -EXPORT_SYMBOL(vfs_get_dqinfo); -EXPORT_SYMBOL(vfs_set_dqinfo); -EXPORT_SYMBOL(vfs_get_dqblk); -EXPORT_SYMBOL(vfs_set_dqblk); -EXPORT_SYMBOL(dquot_commit); -EXPORT_SYMBOL(dquot_commit_info); -EXPORT_SYMBOL(dquot_acquire); -EXPORT_SYMBOL(dquot_release); -EXPORT_SYMBOL(dquot_mark_dquot_dirty); -EXPORT_SYMBOL(dquot_initialize); -EXPORT_SYMBOL(dquot_drop); -EXPORT_SYMBOL(vfs_dq_drop); -EXPORT_SYMBOL(dqget); -EXPORT_SYMBOL(dqput); -EXPORT_SYMBOL(dquot_alloc_space); -EXPORT_SYMBOL(dquot_alloc_inode); -EXPORT_SYMBOL(dquot_free_space); -EXPORT_SYMBOL(dquot_free_inode); -EXPORT_SYMBOL(dquot_transfer); -EXPORT_SYMBOL(vfs_dq_transfer); -EXPORT_SYMBOL(vfs_dq_quota_on_remount); -- cgit v1.2.3-70-g09d2 From a219ce3748bbc596cec85c44754b3f6b994f1e1d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 12 Jan 2009 19:03:19 +0100 Subject: ext3: Remove unnecessary quota functions ext3_dquot_initialize() and ext3_dquot_drop() is no longer needed because of modified quota locking. Signed-off-by: Jan Kara --- fs/ext3/super.c | 44 ++------------------------------------------ 1 file changed, 2 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 4a970411a45..41e6ae605e0 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) -static int ext3_dquot_initialize(struct inode *inode, int type); -static int ext3_dquot_drop(struct inode *inode); static int ext3_write_dquot(struct dquot *dquot); static int ext3_acquire_dquot(struct dquot *dquot); static int ext3_release_dquot(struct dquot *dquot); @@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static struct dquot_operations ext3_quota_operations = { - .initialize = ext3_dquot_initialize, - .drop = ext3_dquot_drop, + .initialize = dquot_initialize, + .drop = dquot_drop, .alloc_space = dquot_alloc_space, .alloc_inode = dquot_alloc_inode, .free_space = dquot_free_space, @@ -2714,44 +2712,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot) return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; } -static int ext3_dquot_initialize(struct inode *inode, int type) -{ - handle_t *handle; - int ret, err; - - /* We may create quota structure so we need to reserve enough blocks */ - handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - ret = dquot_initialize(inode, type); - err = ext3_journal_stop(handle); - if (!ret) - ret = err; - return ret; -} - -static int ext3_dquot_drop(struct inode *inode) -{ - handle_t *handle; - int ret, err; - - /* We may delete quota structure so we need to reserve enough blocks */ - handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) { - /* - * We call dquot_drop() anyway to at least release references - * to quota structures so that umount does not hang. - */ - dquot_drop(inode); - return PTR_ERR(handle); - } - ret = dquot_drop(inode); - err = ext3_journal_stop(handle); - if (!ret) - ret = err; - return ret; -} - static int ext3_write_dquot(struct dquot *dquot) { int ret, err; -- cgit v1.2.3-70-g09d2 From edf7245362f7b8b8c76c4a6cad3604bf80884848 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 12 Jan 2009 19:05:26 +0100 Subject: ext4: Remove unnecessary quota functions ext4_dquot_initialize() and ext4_dquot_drop() is no longer needed because of modified quota locking. Signed-off-by: Jan Kara --- fs/ext4/super.c | 44 ++------------------------------------------ 1 file changed, 2 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 39d1993cfa1..41f879497f9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_ #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) -static int ext4_dquot_initialize(struct inode *inode, int type); -static int ext4_dquot_drop(struct inode *inode); static int ext4_write_dquot(struct dquot *dquot); static int ext4_acquire_dquot(struct dquot *dquot); static int ext4_release_dquot(struct dquot *dquot); @@ -942,8 +940,8 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static struct dquot_operations ext4_quota_operations = { - .initialize = ext4_dquot_initialize, - .drop = ext4_dquot_drop, + .initialize = dquot_initialize, + .drop = dquot_drop, .alloc_space = dquot_alloc_space, .alloc_inode = dquot_alloc_inode, .free_space = dquot_free_space, @@ -3380,44 +3378,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot) return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; } -static int ext4_dquot_initialize(struct inode *inode, int type) -{ - handle_t *handle; - int ret, err; - - /* We may create quota structure so we need to reserve enough blocks */ - handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - ret = dquot_initialize(inode, type); - err = ext4_journal_stop(handle); - if (!ret) - ret = err; - return ret; -} - -static int ext4_dquot_drop(struct inode *inode) -{ - handle_t *handle; - int ret, err; - - /* We may delete quota structure so we need to reserve enough blocks */ - handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) { - /* - * We call dquot_drop() anyway to at least release references - * to quota structures so that umount does not hang. - */ - dquot_drop(inode); - return PTR_ERR(handle); - } - ret = dquot_drop(inode); - err = ext4_journal_stop(handle); - if (!ret) - ret = err; - return ret; -} - static int ext4_write_dquot(struct dquot *dquot) { int ret, err; -- cgit v1.2.3-70-g09d2 From 643d00ccc311664188c8209bf8b596a30e139c3a Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 12 Jan 2009 19:07:14 +0100 Subject: reiserfs: Remove unnecessary quota functions reiserfs_dquot_initialize() and reiserfs_dquot_drop() is no longer needed because of modified quota locking. Signed-off-by: Jan Kara --- fs/reiserfs/super.c | 58 ++--------------------------------------------------- 1 file changed, 2 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index f3c820b7582..317c0352163 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = { #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") -static int reiserfs_dquot_initialize(struct inode *, int); -static int reiserfs_dquot_drop(struct inode *); static int reiserfs_write_dquot(struct dquot *); static int reiserfs_acquire_dquot(struct dquot *); static int reiserfs_release_dquot(struct dquot *); @@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int); static int reiserfs_quota_on(struct super_block *, int, int, char *, int); static struct dquot_operations reiserfs_quota_operations = { - .initialize = reiserfs_dquot_initialize, - .drop = reiserfs_dquot_drop, + .initialize = dquot_initialize, + .drop = dquot_drop, .alloc_space = dquot_alloc_space, .alloc_inode = dquot_alloc_inode, .free_space = dquot_free_space, @@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf) } #ifdef CONFIG_QUOTA -static int reiserfs_dquot_initialize(struct inode *inode, int type) -{ - struct reiserfs_transaction_handle th; - int ret, err; - - /* We may create quota structure so we need to reserve enough blocks */ - reiserfs_write_lock(inode->i_sb); - ret = - journal_begin(&th, inode->i_sb, - 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb)); - if (ret) - goto out; - ret = dquot_initialize(inode, type); - err = - journal_end(&th, inode->i_sb, - 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb)); - if (!ret && err) - ret = err; - out: - reiserfs_write_unlock(inode->i_sb); - return ret; -} - -static int reiserfs_dquot_drop(struct inode *inode) -{ - struct reiserfs_transaction_handle th; - int ret, err; - - /* We may delete quota structure so we need to reserve enough blocks */ - reiserfs_write_lock(inode->i_sb); - ret = - journal_begin(&th, inode->i_sb, - 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)); - if (ret) { - /* - * We call dquot_drop() anyway to at least release references - * to quota structures so that umount does not hang. - */ - dquot_drop(inode); - goto out; - } - ret = dquot_drop(inode); - err = - journal_end(&th, inode->i_sb, - 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)); - if (!ret && err) - ret = err; - out: - reiserfs_write_unlock(inode->i_sb); - return ret; -} - static int reiserfs_write_dquot(struct dquot *dquot) { struct reiserfs_transaction_handle th; -- cgit v1.2.3-70-g09d2 From 60e58e0f30e723464c2a7d34b71b8675566c572d Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Thu, 22 Jan 2009 18:13:05 +0100 Subject: ext4: quota reservation for delayed allocation Uses quota reservation/claim/release to handle quota properly for delayed allocation in the three steps: 1) quotas are reserved when data being copied to cache when block allocation is defered 2) when new blocks are allocated. reserved quotas are converted to the real allocated quota, 2) over-booked quotas for metadata blocks are released back. Signed-off-by: Mingming Cao Acked-by: "Theodore Ts'o" Signed-off-by: Jan Kara --- fs/ext4/ext4.h | 2 ++ fs/ext4/inode.c | 36 +++++++++++++++++++++++++++++++++--- fs/ext4/mballoc.c | 44 ++++++++++++++++++++++++++------------------ fs/ext4/super.c | 4 ++++ 4 files changed, 65 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b0c87dce66a..6083bb38057 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -20,6 +20,7 @@ #include #include #include +#include #include "ext4_i.h" /* @@ -1098,6 +1099,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); extern int ext4_block_truncate_page(handle_t *handle, struct address_space *mapping, loff_t from); extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page); +extern qsize_t ext4_get_reserved_space(struct inode *inode); /* ioctl.c */ extern long ext4_ioctl(struct file *, unsigned int, unsigned long); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c7fed5b1874..8290cfbd9fa 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -975,6 +975,17 @@ out: return err; } +qsize_t ext4_get_reserved_space(struct inode *inode) +{ + unsigned long long total; + + spin_lock(&EXT4_I(inode)->i_block_reservation_lock); + total = EXT4_I(inode)->i_reserved_data_blocks + + EXT4_I(inode)->i_reserved_meta_blocks; + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + + return total; +} /* * Calculate the number of metadata blocks need to reserve * to allocate @blocks for non extent file based file @@ -1036,8 +1047,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) /* update per-inode reservations */ BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); EXT4_I(inode)->i_reserved_data_blocks -= used; - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + + /* + * free those over-booking quota for metadata blocks + */ + + if (mdb_free) + vfs_dq_release_reservation_block(inode, mdb_free); } /* @@ -1553,8 +1570,8 @@ static int ext4_journalled_write_end(struct file *file, static int ext4_da_reserve_space(struct inode *inode, int nrblocks) { int retries = 0; - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - unsigned long md_needed, mdblocks, total = 0; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + unsigned long md_needed, mdblocks, total = 0; /* * recalculate the amount of metadata blocks to reserve @@ -1570,12 +1587,23 @@ repeat: md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; total = md_needed + nrblocks; + /* + * Make quota reservation here to prevent quota overflow + * later. Real quota accounting is done at pages writeout + * time. + */ + if (vfs_dq_reserve_block(inode, total)) { + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + return -EDQUOT; + } + if (ext4_claim_free_blocks(sbi, total)) { spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); if (ext4_should_retry_alloc(inode->i_sb, &retries)) { yield(); goto repeat; } + vfs_dq_release_reservation_block(inode, total); return -ENOSPC; } EXT4_I(inode)->i_reserved_data_blocks += nrblocks; @@ -1629,6 +1657,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free) BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); EXT4_I(inode)->i_reserved_meta_blocks = mdb; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + + vfs_dq_release_reservation_block(inode, release); } static void ext4_da_page_release_reservation(struct page *page, diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 9f61e62f435..4de42090c41 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); - else + else { percpu_counter_sub(&sbi->s_dirtyblocks_counter, ac->ac_b_ex.fe_len); + /* convert reserved quota blocks to real quota blocks */ + vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len); + } if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, @@ -4544,7 +4547,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; - unsigned int inquota; + unsigned int inquota = 0; unsigned int reserv_blks = 0; sb = ar->inode->i_sb; @@ -4562,9 +4565,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, (unsigned long long) ar->pleft, (unsigned long long) ar->pright); - if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) { - /* - * With delalloc we already reserved the blocks + /* + * For delayed allocation, we could skip the ENOSPC and + * EDQUOT check, as blocks and quotas have been already + * reserved when data being copied into pagecache. + */ + if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) + ar->flags |= EXT4_MB_DELALLOC_RESERVED; + else { + /* Without delayed allocation we need to verify + * there is enough free blocks to do block allocation + * and verify allocation doesn't exceed the quota limits. */ while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { /* let others to free the space */ @@ -4576,19 +4587,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, return 0; } reserv_blks = ar->len; + while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { + ar->flags |= EXT4_MB_HINT_NOPREALLOC; + ar->len--; + } + inquota = ar->len; + if (ar->len == 0) { + *errp = -EDQUOT; + goto out3; + } } - while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { - ar->flags |= EXT4_MB_HINT_NOPREALLOC; - ar->len--; - } - if (ar->len == 0) { - *errp = -EDQUOT; - goto out3; - } - inquota = ar->len; - - if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) - ar->flags |= EXT4_MB_DELALLOC_RESERVED; ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); if (!ac) { @@ -4654,7 +4662,7 @@ repeat: out2: kmem_cache_free(ext4_ac_cachep, ac); out1: - if (ar->len < inquota) + if (inquota && ar->len < inquota) DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); out3: if (!ar->len) { diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 41f879497f9..5a238e9c71c 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -943,6 +943,10 @@ static struct dquot_operations ext4_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, + .reserve_space = dquot_reserve_space, + .claim_space = dquot_claim_space, + .release_rsv = dquot_release_reserved_space, + .get_reserved_space = ext4_get_reserved_space, .alloc_inode = dquot_alloc_inode, .free_space = dquot_free_space, .free_inode = dquot_free_inode, -- cgit v1.2.3-70-g09d2 From 884d179dff3aa98a73c3ba9dee05fd6050d664f0 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 15:28:09 +0100 Subject: quota: Move quota files into separate directory Quota subsystem has more and more files. It's time to create a dir for it. Signed-off-by: Jan Kara --- fs/Kconfig | 56 +- fs/Makefile | 6 +- fs/dquot.c | 2564 ------------------------------------------------- fs/quota.c | 513 ---------- fs/quota/Kconfig | 59 ++ fs/quota/Makefile | 14 + fs/quota/dquot.c | 2564 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/quota/quota.c | 513 ++++++++++ fs/quota/quota_tree.c | 645 +++++++++++++ fs/quota/quota_tree.h | 25 + fs/quota/quota_v1.c | 218 +++++ fs/quota/quota_v2.c | 236 +++++ fs/quota/quotaio_v1.h | 33 + fs/quota/quotaio_v2.h | 60 ++ fs/quota_tree.c | 645 ------------- fs/quota_tree.h | 25 - fs/quota_v1.c | 218 ----- fs/quota_v2.c | 236 ----- fs/quotaio_v1.h | 33 - fs/quotaio_v2.h | 60 -- 20 files changed, 4369 insertions(+), 4354 deletions(-) delete mode 100644 fs/dquot.c delete mode 100644 fs/quota.c create mode 100644 fs/quota/Kconfig create mode 100644 fs/quota/Makefile create mode 100644 fs/quota/dquot.c create mode 100644 fs/quota/quota.c create mode 100644 fs/quota/quota_tree.c create mode 100644 fs/quota/quota_tree.h create mode 100644 fs/quota/quota_v1.c create mode 100644 fs/quota/quota_v2.c create mode 100644 fs/quota/quotaio_v1.h create mode 100644 fs/quota/quotaio_v2.h delete mode 100644 fs/quota_tree.c delete mode 100644 fs/quota_tree.h delete mode 100644 fs/quota_v1.c delete mode 100644 fs/quota_v2.c delete mode 100644 fs/quotaio_v1.h delete mode 100644 fs/quotaio_v2.h (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index 93945dd0b1a..cef8b18ceaa 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -56,61 +56,7 @@ endif # BLOCK source "fs/notify/Kconfig" -config QUOTA - bool "Quota support" - help - If you say Y here, you will be able to set per user limits for disk - usage (also called disk quotas). Currently, it works for the - ext2, ext3, and reiserfs file system. ext3 also supports journalled - quotas for which you don't need to run quotacheck(8) after an unclean - shutdown. - For further details, read the Quota mini-HOWTO, available from - , or the documentation provided - with the quota tools. Probably the quota support is only useful for - multi user systems. If unsure, say N. - -config QUOTA_NETLINK_INTERFACE - bool "Report quota messages through netlink interface" - depends on QUOTA && NET - help - If you say Y here, quota warnings (about exceeding softlimit, reaching - hardlimit, etc.) will be reported through netlink interface. If unsure, - say Y. - -config PRINT_QUOTA_WARNING - bool "Print quota warnings to console (OBSOLETE)" - depends on QUOTA - default y - help - If you say Y here, quota warnings (about exceeding softlimit, reaching - hardlimit, etc.) will be printed to the process' controlling terminal. - Note that this behavior is currently deprecated and may go away in - future. Please use notification via netlink socket instead. - -# Generic support for tree structured quota files. Seleted when needed. -config QUOTA_TREE - tristate - -config QFMT_V1 - tristate "Old quota format support" - depends on QUOTA - help - This quota format was (is) used by kernels earlier than 2.4.22. If - you have quota working and you don't want to convert to new quota - format say Y here. - -config QFMT_V2 - tristate "Quota format v2 support" - depends on QUOTA - select QUOTA_TREE - help - This quota format allows using quotas with 32-bit UIDs/GIDs. If you - need this functionality say Y here. - -config QUOTACTL - bool - depends on XFS_QUOTA || QUOTA - default y +source "fs/quota/Kconfig" source "fs/autofs/Kconfig" source "fs/autofs4/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index dc20db34867..6e82a307bcd 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -51,11 +51,7 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o obj-$(CONFIG_NFS_COMMON) += nfs_common/ obj-$(CONFIG_GENERIC_ACL) += generic_acl.o -obj-$(CONFIG_QUOTA) += dquot.o -obj-$(CONFIG_QFMT_V1) += quota_v1.o -obj-$(CONFIG_QFMT_V2) += quota_v2.o -obj-$(CONFIG_QUOTA_TREE) += quota_tree.o -obj-$(CONFIG_QUOTACTL) += quota.o +obj-y += quota/ obj-$(CONFIG_PROC_FS) += proc/ obj-y += partitions/ diff --git a/fs/dquot.c b/fs/dquot.c deleted file mode 100644 index 28aa1466760..00000000000 --- a/fs/dquot.c +++ /dev/null @@ -1,2564 +0,0 @@ -/* - * Implementation of the diskquota system for the LINUX operating system. QUOTA - * is implemented using the BSD system call interface as the means of - * communication with the user level. This file contains the generic routines - * called by the different filesystems on allocation of an inode or block. - * These routines take care of the administration needed to have a consistent - * diskquota tracking system. The ideas of both user and group quotas are based - * on the Melbourne quota system as used on BSD derived systems. The internal - * implementation is based on one of the several variants of the LINUX - * inode-subsystem with added complexity of the diskquota system. - * - * Author: Marco van Wieringen - * - * Fixes: Dmitry Gorodchanin , 11 Feb 96 - * - * Revised list management to avoid races - * -- Bill Hawes, , 9/98 - * - * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). - * As the consequence the locking was moved from dquot_decr_...(), - * dquot_incr_...() to calling functions. - * invalidate_dquots() now writes modified dquots. - * Serialized quota_off() and quota_on() for mount point. - * Fixed a few bugs in grow_dquots(). - * Fixed deadlock in write_dquot() - we no longer account quotas on - * quota files - * remove_dquot_ref() moved to inode.c - it now traverses through inodes - * add_dquot_ref() restarts after blocking - * Added check for bogus uid and fixed check for group in quotactl. - * Jan Kara, , sponsored by SuSE CR, 10-11/99 - * - * Used struct list_head instead of own list struct - * Invalidation of referenced dquots is no longer possible - * Improved free_dquots list management - * Quota and i_blocks are now updated in one place to avoid races - * Warnings are now delayed so we won't block in critical section - * Write updated not to require dquot lock - * Jan Kara, , 9/2000 - * - * Added dynamic quota structure allocation - * Jan Kara 12/2000 - * - * Rewritten quota interface. Implemented new quota format and - * formats registering. - * Jan Kara, , 2001,2002 - * - * New SMP locking. - * Jan Kara, , 10/2002 - * - * Added journalled quota support, fix lock inversion problems - * Jan Kara, , 2003,2004 - * - * (C) Copyright 1994 - 1997 Marco van Wieringen - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* for inode_lock, oddly enough.. */ -#ifdef CONFIG_QUOTA_NETLINK_INTERFACE -#include -#include -#endif - -#include - -#define __DQUOT_PARANOIA - -/* - * There are three quota SMP locks. dq_list_lock protects all lists with quotas - * and quota formats, dqstats structure containing statistics about the lists - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects - * modifications of quota state (on quotaon and quotaoff) and readers who care - * about latest values take it as well. - * - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, - * dq_list_lock > dq_state_lock - * - * Note that some things (eg. sb pointer, type, id) doesn't change during - * the life of the dquot structure and so needn't to be protected by a lock - * - * Any operation working on dquots via inode pointers must hold dqptr_sem. If - * operation is just reading pointers from inode (or not using them at all) the - * read lock is enough. If pointers are altered function must hold write lock - * (these locking rules also apply for S_NOQUOTA flag in the inode - note that - * for altering the flag i_mutex is also needed). - * - * Each dquot has its dq_lock mutex. Locked dquots might not be referenced - * from inodes (dquot_alloc_space() and such don't check the dq_lock). - * Currently dquot is locked only when it is being read to memory (or space for - * it is being allocated) on the first dqget() and when it is being released on - * the last dqput(). The allocation and release oparations are serialized by - * the dq_lock and by checking the use count in dquot_release(). Write - * operations on dquots don't hold dq_lock as they copy data under dq_data_lock - * spinlock to internal buffers before writing. - * - * Lock ordering (including related VFS locks) is the following: - * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > - * dqio_mutex - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > - * dqptr_sem. But filesystem has to count with the fact that functions such as - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called - * from inside a transaction to keep filesystem consistency after a crash. Also - * filesystems usually want to do some IO on dquot from ->mark_dirty which is - * called with dqptr_sem held. - * i_mutex on quota files is special (it's below dqio_mutex) - */ - -static DEFINE_SPINLOCK(dq_list_lock); -static DEFINE_SPINLOCK(dq_state_lock); -DEFINE_SPINLOCK(dq_data_lock); -EXPORT_SYMBOL(dq_data_lock); - -static char *quotatypes[] = INITQFNAMES; -static struct quota_format_type *quota_formats; /* List of registered formats */ -static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; - -/* SLAB cache for dquot structures */ -static struct kmem_cache *dquot_cachep; - -int register_quota_format(struct quota_format_type *fmt) -{ - spin_lock(&dq_list_lock); - fmt->qf_next = quota_formats; - quota_formats = fmt; - spin_unlock(&dq_list_lock); - return 0; -} -EXPORT_SYMBOL(register_quota_format); - -void unregister_quota_format(struct quota_format_type *fmt) -{ - struct quota_format_type **actqf; - - spin_lock(&dq_list_lock); - for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); - if (*actqf) - *actqf = (*actqf)->qf_next; - spin_unlock(&dq_list_lock); -} -EXPORT_SYMBOL(unregister_quota_format); - -static struct quota_format_type *find_quota_format(int id) -{ - struct quota_format_type *actqf; - - spin_lock(&dq_list_lock); - for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); - if (!actqf || !try_module_get(actqf->qf_owner)) { - int qm; - - spin_unlock(&dq_list_lock); - - for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); - if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) - return NULL; - - spin_lock(&dq_list_lock); - for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); - if (actqf && !try_module_get(actqf->qf_owner)) - actqf = NULL; - } - spin_unlock(&dq_list_lock); - return actqf; -} - -static void put_quota_format(struct quota_format_type *fmt) -{ - module_put(fmt->qf_owner); -} - -/* - * Dquot List Management: - * The quota code uses three lists for dquot management: the inuse_list, - * free_dquots, and dquot_hash[] array. A single dquot structure may be - * on all three lists, depending on its current state. - * - * All dquots are placed to the end of inuse_list when first created, and this - * list is used for invalidate operation, which must look at every dquot. - * - * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, - * and this list is searched whenever we need an available dquot. Dquots are - * removed from the list as soon as they are used again, and - * dqstats.free_dquots gives the number of dquots on the list. When - * dquot is invalidated it's completely released from memory. - * - * Dquots with a specific identity (device, type and id) are placed on - * one of the dquot_hash[] hash chains. The provides an efficient search - * mechanism to locate a specific dquot. - */ - -static LIST_HEAD(inuse_list); -static LIST_HEAD(free_dquots); -static unsigned int dq_hash_bits, dq_hash_mask; -static struct hlist_head *dquot_hash; - -struct dqstats dqstats; -EXPORT_SYMBOL(dqstats); - -static inline unsigned int -hashfn(const struct super_block *sb, unsigned int id, int type) -{ - unsigned long tmp; - - tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); - return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; -} - -/* - * Following list functions expect dq_list_lock to be held - */ -static inline void insert_dquot_hash(struct dquot *dquot) -{ - struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); - hlist_add_head(&dquot->dq_hash, head); -} - -static inline void remove_dquot_hash(struct dquot *dquot) -{ - hlist_del_init(&dquot->dq_hash); -} - -static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) -{ - struct hlist_node *node; - struct dquot *dquot; - - hlist_for_each (node, dquot_hash+hashent) { - dquot = hlist_entry(node, struct dquot, dq_hash); - if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) - return dquot; - } - return NODQUOT; -} - -/* Add a dquot to the tail of the free list */ -static inline void put_dquot_last(struct dquot *dquot) -{ - list_add_tail(&dquot->dq_free, &free_dquots); - dqstats.free_dquots++; -} - -static inline void remove_free_dquot(struct dquot *dquot) -{ - if (list_empty(&dquot->dq_free)) - return; - list_del_init(&dquot->dq_free); - dqstats.free_dquots--; -} - -static inline void put_inuse(struct dquot *dquot) -{ - /* We add to the back of inuse list so we don't have to restart - * when traversing this list and we block */ - list_add_tail(&dquot->dq_inuse, &inuse_list); - dqstats.allocated_dquots++; -} - -static inline void remove_inuse(struct dquot *dquot) -{ - dqstats.allocated_dquots--; - list_del(&dquot->dq_inuse); -} -/* - * End of list functions needing dq_list_lock - */ - -static void wait_on_dquot(struct dquot *dquot) -{ - mutex_lock(&dquot->dq_lock); - mutex_unlock(&dquot->dq_lock); -} - -static inline int dquot_dirty(struct dquot *dquot) -{ - return test_bit(DQ_MOD_B, &dquot->dq_flags); -} - -static inline int mark_dquot_dirty(struct dquot *dquot) -{ - return dquot->dq_sb->dq_op->mark_dirty(dquot); -} - -int dquot_mark_dquot_dirty(struct dquot *dquot) -{ - spin_lock(&dq_list_lock); - if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) - list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> - info[dquot->dq_type].dqi_dirty_list); - spin_unlock(&dq_list_lock); - return 0; -} -EXPORT_SYMBOL(dquot_mark_dquot_dirty); - -/* This function needs dq_list_lock */ -static inline int clear_dquot_dirty(struct dquot *dquot) -{ - if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) - return 0; - list_del_init(&dquot->dq_dirty); - return 1; -} - -void mark_info_dirty(struct super_block *sb, int type) -{ - set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); -} -EXPORT_SYMBOL(mark_info_dirty); - -/* - * Read dquot from disk and alloc space for it - */ - -int dquot_acquire(struct dquot *dquot) -{ - int ret = 0, ret2 = 0; - struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - - mutex_lock(&dquot->dq_lock); - mutex_lock(&dqopt->dqio_mutex); - if (!test_bit(DQ_READ_B, &dquot->dq_flags)) - ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); - if (ret < 0) - goto out_iolock; - set_bit(DQ_READ_B, &dquot->dq_flags); - /* Instantiate dquot if needed */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); - /* Write the info if needed */ - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); - if (ret < 0) - goto out_iolock; - if (ret2 < 0) { - ret = ret2; - goto out_iolock; - } - } - set_bit(DQ_ACTIVE_B, &dquot->dq_flags); -out_iolock: - mutex_unlock(&dqopt->dqio_mutex); - mutex_unlock(&dquot->dq_lock); - return ret; -} -EXPORT_SYMBOL(dquot_acquire); - -/* - * Write dquot to disk - */ -int dquot_commit(struct dquot *dquot) -{ - int ret = 0, ret2 = 0; - struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - - mutex_lock(&dqopt->dqio_mutex); - spin_lock(&dq_list_lock); - if (!clear_dquot_dirty(dquot)) { - spin_unlock(&dq_list_lock); - goto out_sem; - } - spin_unlock(&dq_list_lock); - /* Inactive dquot can be only if there was error during read/init - * => we have better not writing it */ - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); - if (ret >= 0) - ret = ret2; - } -out_sem: - mutex_unlock(&dqopt->dqio_mutex); - return ret; -} -EXPORT_SYMBOL(dquot_commit); - -/* - * Release dquot - */ -int dquot_release(struct dquot *dquot) -{ - int ret = 0, ret2 = 0; - struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - - mutex_lock(&dquot->dq_lock); - /* Check whether we are not racing with some other dqget() */ - if (atomic_read(&dquot->dq_count) > 1) - goto out_dqlock; - mutex_lock(&dqopt->dqio_mutex); - if (dqopt->ops[dquot->dq_type]->release_dqblk) { - ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); - /* Write the info */ - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); - if (ret >= 0) - ret = ret2; - } - clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); - mutex_unlock(&dqopt->dqio_mutex); -out_dqlock: - mutex_unlock(&dquot->dq_lock); - return ret; -} -EXPORT_SYMBOL(dquot_release); - -void dquot_destroy(struct dquot *dquot) -{ - kmem_cache_free(dquot_cachep, dquot); -} -EXPORT_SYMBOL(dquot_destroy); - -static inline void do_destroy_dquot(struct dquot *dquot) -{ - dquot->dq_sb->dq_op->destroy_dquot(dquot); -} - -/* Invalidate all dquots on the list. Note that this function is called after - * quota is disabled and pointers from inodes removed so there cannot be new - * quota users. There can still be some users of quotas due to inodes being - * just deleted or pruned by prune_icache() (those are not attached to any - * list) or parallel quotactl call. We have to wait for such users. - */ -static void invalidate_dquots(struct super_block *sb, int type) -{ - struct dquot *dquot, *tmp; - -restart: - spin_lock(&dq_list_lock); - list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { - if (dquot->dq_sb != sb) - continue; - if (dquot->dq_type != type) - continue; - /* Wait for dquot users */ - if (atomic_read(&dquot->dq_count)) { - DEFINE_WAIT(wait); - - atomic_inc(&dquot->dq_count); - prepare_to_wait(&dquot->dq_wait_unused, &wait, - TASK_UNINTERRUPTIBLE); - spin_unlock(&dq_list_lock); - /* Once dqput() wakes us up, we know it's time to free - * the dquot. - * IMPORTANT: we rely on the fact that there is always - * at most one process waiting for dquot to free. - * Otherwise dq_count would be > 1 and we would never - * wake up. - */ - if (atomic_read(&dquot->dq_count) > 1) - schedule(); - finish_wait(&dquot->dq_wait_unused, &wait); - dqput(dquot); - /* At this moment dquot() need not exist (it could be - * reclaimed by prune_dqcache(). Hence we must - * restart. */ - goto restart; - } - /* - * Quota now has no users and it has been written on last - * dqput() - */ - remove_dquot_hash(dquot); - remove_free_dquot(dquot); - remove_inuse(dquot); - do_destroy_dquot(dquot); - } - spin_unlock(&dq_list_lock); -} - -/* Call callback for every active dquot on given filesystem */ -int dquot_scan_active(struct super_block *sb, - int (*fn)(struct dquot *dquot, unsigned long priv), - unsigned long priv) -{ - struct dquot *dquot, *old_dquot = NULL; - int ret = 0; - - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - spin_lock(&dq_list_lock); - list_for_each_entry(dquot, &inuse_list, dq_inuse) { - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) - continue; - if (dquot->dq_sb != sb) - continue; - /* Now we have active dquot so we can just increase use count */ - atomic_inc(&dquot->dq_count); - dqstats.lookups++; - spin_unlock(&dq_list_lock); - dqput(old_dquot); - old_dquot = dquot; - ret = fn(dquot, priv); - if (ret < 0) - goto out; - spin_lock(&dq_list_lock); - /* We are safe to continue now because our dquot could not - * be moved out of the inuse list while we hold the reference */ - } - spin_unlock(&dq_list_lock); -out: - dqput(old_dquot); - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return ret; -} -EXPORT_SYMBOL(dquot_scan_active); - -int vfs_quota_sync(struct super_block *sb, int type) -{ - struct list_head *dirty; - struct dquot *dquot; - struct quota_info *dqopt = sb_dqopt(sb); - int cnt; - - mutex_lock(&dqopt->dqonoff_mutex); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && cnt != type) - continue; - if (!sb_has_quota_active(sb, cnt)) - continue; - spin_lock(&dq_list_lock); - dirty = &dqopt->info[cnt].dqi_dirty_list; - while (!list_empty(dirty)) { - dquot = list_first_entry(dirty, struct dquot, dq_dirty); - /* Dirty and inactive can be only bad dquot... */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { - clear_dquot_dirty(dquot); - continue; - } - /* Now we have active dquot from which someone is - * holding reference so we can safely just increase - * use count */ - atomic_inc(&dquot->dq_count); - dqstats.lookups++; - spin_unlock(&dq_list_lock); - sb->dq_op->write_dquot(dquot); - dqput(dquot); - spin_lock(&dq_list_lock); - } - spin_unlock(&dq_list_lock); - } - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) - && info_dirty(&dqopt->info[cnt])) - sb->dq_op->write_info(sb, cnt); - spin_lock(&dq_list_lock); - dqstats.syncs++; - spin_unlock(&dq_list_lock); - mutex_unlock(&dqopt->dqonoff_mutex); - - return 0; -} -EXPORT_SYMBOL(vfs_quota_sync); - -/* Free unused dquots from cache */ -static void prune_dqcache(int count) -{ - struct list_head *head; - struct dquot *dquot; - - head = free_dquots.prev; - while (head != &free_dquots && count) { - dquot = list_entry(head, struct dquot, dq_free); - remove_dquot_hash(dquot); - remove_free_dquot(dquot); - remove_inuse(dquot); - do_destroy_dquot(dquot); - count--; - head = free_dquots.prev; - } -} - -/* - * This is called from kswapd when we think we need some - * more memory - */ - -static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) -{ - if (nr) { - spin_lock(&dq_list_lock); - prune_dqcache(nr); - spin_unlock(&dq_list_lock); - } - return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; -} - -static struct shrinker dqcache_shrinker = { - .shrink = shrink_dqcache_memory, - .seeks = DEFAULT_SEEKS, -}; - -/* - * Put reference to dquot - * NOTE: If you change this function please check whether dqput_blocks() works right... - */ -void dqput(struct dquot *dquot) -{ - int ret; - - if (!dquot) - return; -#ifdef __DQUOT_PARANOIA - if (!atomic_read(&dquot->dq_count)) { - printk("VFS: dqput: trying to free free dquot\n"); - printk("VFS: device %s, dquot of %s %d\n", - dquot->dq_sb->s_id, - quotatypes[dquot->dq_type], - dquot->dq_id); - BUG(); - } -#endif - - spin_lock(&dq_list_lock); - dqstats.drops++; - spin_unlock(&dq_list_lock); -we_slept: - spin_lock(&dq_list_lock); - if (atomic_read(&dquot->dq_count) > 1) { - /* We have more than one user... nothing to do */ - atomic_dec(&dquot->dq_count); - /* Releasing dquot during quotaoff phase? */ - if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && - atomic_read(&dquot->dq_count) == 1) - wake_up(&dquot->dq_wait_unused); - spin_unlock(&dq_list_lock); - return; - } - /* Need to release dquot? */ - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { - spin_unlock(&dq_list_lock); - /* Commit dquot before releasing */ - ret = dquot->dq_sb->dq_op->write_dquot(dquot); - if (ret < 0) { - printk(KERN_ERR "VFS: cannot write quota structure on " - "device %s (error %d). Quota may get out of " - "sync!\n", dquot->dq_sb->s_id, ret); - /* - * We clear dirty bit anyway, so that we avoid - * infinite loop here - */ - spin_lock(&dq_list_lock); - clear_dquot_dirty(dquot); - spin_unlock(&dq_list_lock); - } - goto we_slept; - } - /* Clear flag in case dquot was inactive (something bad happened) */ - clear_dquot_dirty(dquot); - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { - spin_unlock(&dq_list_lock); - dquot->dq_sb->dq_op->release_dquot(dquot); - goto we_slept; - } - atomic_dec(&dquot->dq_count); -#ifdef __DQUOT_PARANOIA - /* sanity check */ - BUG_ON(!list_empty(&dquot->dq_free)); -#endif - put_dquot_last(dquot); - spin_unlock(&dq_list_lock); -} -EXPORT_SYMBOL(dqput); - -struct dquot *dquot_alloc(struct super_block *sb, int type) -{ - return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); -} -EXPORT_SYMBOL(dquot_alloc); - -static struct dquot *get_empty_dquot(struct super_block *sb, int type) -{ - struct dquot *dquot; - - dquot = sb->dq_op->alloc_dquot(sb, type); - if(!dquot) - return NODQUOT; - - mutex_init(&dquot->dq_lock); - INIT_LIST_HEAD(&dquot->dq_free); - INIT_LIST_HEAD(&dquot->dq_inuse); - INIT_HLIST_NODE(&dquot->dq_hash); - INIT_LIST_HEAD(&dquot->dq_dirty); - init_waitqueue_head(&dquot->dq_wait_unused); - dquot->dq_sb = sb; - dquot->dq_type = type; - atomic_set(&dquot->dq_count, 1); - - return dquot; -} - -/* - * Get reference to dquot - * - * Locking is slightly tricky here. We are guarded from parallel quotaoff() - * destroying our dquot by: - * a) checking for quota flags under dq_list_lock and - * b) getting a reference to dquot before we release dq_list_lock - */ -struct dquot *dqget(struct super_block *sb, unsigned int id, int type) -{ - unsigned int hashent = hashfn(sb, id, type); - struct dquot *dquot = NODQUOT, *empty = NODQUOT; - - if (!sb_has_quota_active(sb, type)) - return NODQUOT; -we_slept: - spin_lock(&dq_list_lock); - spin_lock(&dq_state_lock); - if (!sb_has_quota_active(sb, type)) { - spin_unlock(&dq_state_lock); - spin_unlock(&dq_list_lock); - goto out; - } - spin_unlock(&dq_state_lock); - - if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { - if (empty == NODQUOT) { - spin_unlock(&dq_list_lock); - if ((empty = get_empty_dquot(sb, type)) == NODQUOT) - schedule(); /* Try to wait for a moment... */ - goto we_slept; - } - dquot = empty; - empty = NODQUOT; - dquot->dq_id = id; - /* all dquots go on the inuse_list */ - put_inuse(dquot); - /* hash it first so it can be found */ - insert_dquot_hash(dquot); - dqstats.lookups++; - spin_unlock(&dq_list_lock); - } else { - if (!atomic_read(&dquot->dq_count)) - remove_free_dquot(dquot); - atomic_inc(&dquot->dq_count); - dqstats.cache_hits++; - dqstats.lookups++; - spin_unlock(&dq_list_lock); - } - /* Wait for dq_lock - after this we know that either dquot_release() is already - * finished or it will be canceled due to dq_count > 1 test */ - wait_on_dquot(dquot); - /* Read the dquot and instantiate it (everything done only if needed) */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { - dqput(dquot); - dquot = NODQUOT; - goto out; - } -#ifdef __DQUOT_PARANOIA - BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ -#endif -out: - if (empty) - do_destroy_dquot(empty); - - return dquot; -} -EXPORT_SYMBOL(dqget); - -static int dqinit_needed(struct inode *inode, int type) -{ - int cnt; - - if (IS_NOQUOTA(inode)) - return 0; - if (type != -1) - return inode->i_dquot[type] == NODQUOT; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] == NODQUOT) - return 1; - return 0; -} - -/* This routine is guarded by dqonoff_mutex mutex */ -static void add_dquot_ref(struct super_block *sb, int type) -{ - struct inode *inode, *old_inode = NULL; - - spin_lock(&inode_lock); - list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { - if (!atomic_read(&inode->i_writecount)) - continue; - if (!dqinit_needed(inode, type)) - continue; - if (inode->i_state & (I_FREEING|I_WILL_FREE)) - continue; - - __iget(inode); - spin_unlock(&inode_lock); - - iput(old_inode); - sb->dq_op->initialize(inode, type); - /* We hold a reference to 'inode' so it couldn't have been - * removed from s_inodes list while we dropped the inode_lock. - * We cannot iput the inode now as we can be holding the last - * reference and we cannot iput it under inode_lock. So we - * keep the reference and iput it later. */ - old_inode = inode; - spin_lock(&inode_lock); - } - spin_unlock(&inode_lock); - iput(old_inode); -} - -/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ -static inline int dqput_blocks(struct dquot *dquot) -{ - if (atomic_read(&dquot->dq_count) <= 1) - return 1; - return 0; -} - -/* Remove references to dquots from inode - add dquot to list for freeing if needed */ -/* We can't race with anybody because we hold dqptr_sem for writing... */ -static int remove_inode_dquot_ref(struct inode *inode, int type, - struct list_head *tofree_head) -{ - struct dquot *dquot = inode->i_dquot[type]; - - inode->i_dquot[type] = NODQUOT; - if (dquot != NODQUOT) { - if (dqput_blocks(dquot)) { -#ifdef __DQUOT_PARANOIA - if (atomic_read(&dquot->dq_count) != 1) - printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); -#endif - spin_lock(&dq_list_lock); - list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ - spin_unlock(&dq_list_lock); - return 1; - } - else - dqput(dquot); /* We have guaranteed we won't block */ - } - return 0; -} - -/* Free list of dquots - called from inode.c */ -/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ -static void put_dquot_list(struct list_head *tofree_head) -{ - struct list_head *act_head; - struct dquot *dquot; - - act_head = tofree_head->next; - /* So now we have dquots on the list... Just free them */ - while (act_head != tofree_head) { - dquot = list_entry(act_head, struct dquot, dq_free); - act_head = act_head->next; - list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ - dqput(dquot); - } -} - -static void remove_dquot_ref(struct super_block *sb, int type, - struct list_head *tofree_head) -{ - struct inode *inode; - - spin_lock(&inode_lock); - list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { - if (!IS_NOQUOTA(inode)) - remove_inode_dquot_ref(inode, type, tofree_head); - } - spin_unlock(&inode_lock); -} - -/* Gather all references from inodes and drop them */ -static void drop_dquot_ref(struct super_block *sb, int type) -{ - LIST_HEAD(tofree_head); - - if (sb->dq_op) { - down_write(&sb_dqopt(sb)->dqptr_sem); - remove_dquot_ref(sb, type, &tofree_head); - up_write(&sb_dqopt(sb)->dqptr_sem); - put_dquot_list(&tofree_head); - } -} - -static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_curinodes += number; -} - -static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_curspace += number; -} - -static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_rsvspace += number; -} - -/* - * Claim reserved quota space - */ -static void dquot_claim_reserved_space(struct dquot *dquot, - qsize_t number) -{ - WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); - dquot->dq_dqb.dqb_curspace += number; - dquot->dq_dqb.dqb_rsvspace -= number; -} - -static inline -void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_rsvspace -= number; -} - -static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) -{ - if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || - dquot->dq_dqb.dqb_curinodes >= number) - dquot->dq_dqb.dqb_curinodes -= number; - else - dquot->dq_dqb.dqb_curinodes = 0; - if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) - dquot->dq_dqb.dqb_itime = (time_t) 0; - clear_bit(DQ_INODES_B, &dquot->dq_flags); -} - -static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) -{ - if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || - dquot->dq_dqb.dqb_curspace >= number) - dquot->dq_dqb.dqb_curspace -= number; - else - dquot->dq_dqb.dqb_curspace = 0; - if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) - dquot->dq_dqb.dqb_btime = (time_t) 0; - clear_bit(DQ_BLKS_B, &dquot->dq_flags); -} - -static int warning_issued(struct dquot *dquot, const int warntype) -{ - int flag = (warntype == QUOTA_NL_BHARDWARN || - warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : - ((warntype == QUOTA_NL_IHARDWARN || - warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); - - if (!flag) - return 0; - return test_and_set_bit(flag, &dquot->dq_flags); -} - -#ifdef CONFIG_PRINT_QUOTA_WARNING -static int flag_print_warnings = 1; - -static inline int need_print_warning(struct dquot *dquot) -{ - if (!flag_print_warnings) - return 0; - - switch (dquot->dq_type) { - case USRQUOTA: - return current_fsuid() == dquot->dq_id; - case GRPQUOTA: - return in_group_p(dquot->dq_id); - } - return 0; -} - -/* Print warning to user which exceeded quota */ -static void print_warning(struct dquot *dquot, const int warntype) -{ - char *msg = NULL; - struct tty_struct *tty; - - if (warntype == QUOTA_NL_IHARDBELOW || - warntype == QUOTA_NL_ISOFTBELOW || - warntype == QUOTA_NL_BHARDBELOW || - warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) - return; - - tty = get_current_tty(); - if (!tty) - return; - tty_write_message(tty, dquot->dq_sb->s_id); - if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) - tty_write_message(tty, ": warning, "); - else - tty_write_message(tty, ": write failed, "); - tty_write_message(tty, quotatypes[dquot->dq_type]); - switch (warntype) { - case QUOTA_NL_IHARDWARN: - msg = " file limit reached.\r\n"; - break; - case QUOTA_NL_ISOFTLONGWARN: - msg = " file quota exceeded too long.\r\n"; - break; - case QUOTA_NL_ISOFTWARN: - msg = " file quota exceeded.\r\n"; - break; - case QUOTA_NL_BHARDWARN: - msg = " block limit reached.\r\n"; - break; - case QUOTA_NL_BSOFTLONGWARN: - msg = " block quota exceeded too long.\r\n"; - break; - case QUOTA_NL_BSOFTWARN: - msg = " block quota exceeded.\r\n"; - break; - } - tty_write_message(tty, msg); - tty_kref_put(tty); -} -#endif - -#ifdef CONFIG_QUOTA_NETLINK_INTERFACE - -/* Netlink family structure for quota */ -static struct genl_family quota_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = 0, - .name = "VFS_DQUOT", - .version = 1, - .maxattr = QUOTA_NL_A_MAX, -}; - -/* Send warning to userspace about user which exceeded quota */ -static void send_warning(const struct dquot *dquot, const char warntype) -{ - static atomic_t seq; - struct sk_buff *skb; - void *msg_head; - int ret; - int msg_size = 4 * nla_total_size(sizeof(u32)) + - 2 * nla_total_size(sizeof(u64)); - - /* We have to allocate using GFP_NOFS as we are called from a - * filesystem performing write and thus further recursion into - * the fs to free some data could cause deadlocks. */ - skb = genlmsg_new(msg_size, GFP_NOFS); - if (!skb) { - printk(KERN_ERR - "VFS: Not enough memory to send quota warning.\n"); - return; - } - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), - "a_genl_family, 0, QUOTA_NL_C_WARNING); - if (!msg_head) { - printk(KERN_ERR - "VFS: Cannot store netlink header in quota warning.\n"); - goto err_out; - } - ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type); - if (ret) - goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id); - if (ret) - goto attr_err_out; - ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); - if (ret) - goto attr_err_out; - ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, - MAJOR(dquot->dq_sb->s_dev)); - if (ret) - goto attr_err_out; - ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, - MINOR(dquot->dq_sb->s_dev)); - if (ret) - goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); - if (ret) - goto attr_err_out; - genlmsg_end(skb, msg_head); - - ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); - if (ret < 0 && ret != -ESRCH) - printk(KERN_ERR - "VFS: Failed to send notification message: %d\n", ret); - return; -attr_err_out: - printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); -err_out: - kfree_skb(skb); -} -#endif -/* - * Write warnings to the console and send warning messages over netlink. - * - * Note that this function can sleep. - */ -static inline void flush_warnings(struct dquot * const *dquots, char *warntype) -{ - int i; - - for (i = 0; i < MAXQUOTAS; i++) - if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && - !warning_issued(dquots[i], warntype[i])) { -#ifdef CONFIG_PRINT_QUOTA_WARNING - print_warning(dquots[i], warntype[i]); -#endif -#ifdef CONFIG_QUOTA_NETLINK_INTERFACE - send_warning(dquots[i], warntype[i]); -#endif - } -} - -static inline char ignore_hardlimit(struct dquot *dquot) -{ - struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; - - return capable(CAP_SYS_RESOURCE) && - (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); -} - -/* needs dq_data_lock */ -static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) -{ - *warntype = QUOTA_NL_NOWARN; - if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || - test_bit(DQ_FAKE_B, &dquot->dq_flags)) - return QUOTA_OK; - - if (dquot->dq_dqb.dqb_ihardlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && - !ignore_hardlimit(dquot)) { - *warntype = QUOTA_NL_IHARDWARN; - return NO_QUOTA; - } - - if (dquot->dq_dqb.dqb_isoftlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && - dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && - !ignore_hardlimit(dquot)) { - *warntype = QUOTA_NL_ISOFTLONGWARN; - return NO_QUOTA; - } - - if (dquot->dq_dqb.dqb_isoftlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && - dquot->dq_dqb.dqb_itime == 0) { - *warntype = QUOTA_NL_ISOFTWARN; - dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; - } - - return QUOTA_OK; -} - -/* needs dq_data_lock */ -static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) -{ - qsize_t tspace; - - *warntype = QUOTA_NL_NOWARN; - if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || - test_bit(DQ_FAKE_B, &dquot->dq_flags)) - return QUOTA_OK; - - tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace - + space; - - if (dquot->dq_dqb.dqb_bhardlimit && - tspace > dquot->dq_dqb.dqb_bhardlimit && - !ignore_hardlimit(dquot)) { - if (!prealloc) - *warntype = QUOTA_NL_BHARDWARN; - return NO_QUOTA; - } - - if (dquot->dq_dqb.dqb_bsoftlimit && - tspace > dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && - !ignore_hardlimit(dquot)) { - if (!prealloc) - *warntype = QUOTA_NL_BSOFTLONGWARN; - return NO_QUOTA; - } - - if (dquot->dq_dqb.dqb_bsoftlimit && - tspace > dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_btime == 0) { - if (!prealloc) { - *warntype = QUOTA_NL_BSOFTWARN; - dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; - } - else - /* - * We don't allow preallocation to exceed softlimit so exceeding will - * be always printed - */ - return NO_QUOTA; - } - - return QUOTA_OK; -} - -static int info_idq_free(struct dquot *dquot, qsize_t inodes) -{ - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || - dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || - !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) - return QUOTA_NL_NOWARN; - - if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) - return QUOTA_NL_ISOFTBELOW; - if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && - dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) - return QUOTA_NL_IHARDBELOW; - return QUOTA_NL_NOWARN; -} - -static int info_bdq_free(struct dquot *dquot, qsize_t space) -{ - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || - dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) - return QUOTA_NL_NOWARN; - - if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) - return QUOTA_NL_BSOFTBELOW; - if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && - dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) - return QUOTA_NL_BHARDBELOW; - return QUOTA_NL_NOWARN; -} -/* - * Initialize quota pointers in inode - * We do things in a bit complicated way but by that we avoid calling - * dqget() and thus filesystem callbacks under dqptr_sem. - */ -int dquot_initialize(struct inode *inode, int type) -{ - unsigned int id = 0; - int cnt, ret = 0; - struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; - struct super_block *sb = inode->i_sb; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) - return 0; - - /* First get references to structures we might need. */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && cnt != type) - continue; - switch (cnt) { - case USRQUOTA: - id = inode->i_uid; - break; - case GRPQUOTA: - id = inode->i_gid; - break; - } - got[cnt] = dqget(sb, id, cnt); - } - - down_write(&sb_dqopt(sb)->dqptr_sem); - /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ - if (IS_NOQUOTA(inode)) - goto out_err; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && cnt != type) - continue; - /* Avoid races with quotaoff() */ - if (!sb_has_quota_active(sb, cnt)) - continue; - if (inode->i_dquot[cnt] == NODQUOT) { - inode->i_dquot[cnt] = got[cnt]; - got[cnt] = NODQUOT; - } - } -out_err: - up_write(&sb_dqopt(sb)->dqptr_sem); - /* Drop unused references */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - dqput(got[cnt]); - return ret; -} -EXPORT_SYMBOL(dquot_initialize); - -/* - * Release all quotas referenced by inode - */ -int dquot_drop(struct inode *inode) -{ - int cnt; - struct dquot *put[MAXQUOTAS]; - - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - put[cnt] = inode->i_dquot[cnt]; - inode->i_dquot[cnt] = NODQUOT; - } - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - dqput(put[cnt]); - return 0; -} -EXPORT_SYMBOL(dquot_drop); - -/* Wrapper to remove references to quota structures from inode */ -void vfs_dq_drop(struct inode *inode) -{ - /* Here we can get arbitrary inode from clear_inode() so we have - * to be careful. OTOH we don't need locking as quota operations - * are allowed to change only at mount time */ - if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op - && inode->i_sb->dq_op->drop) { - int cnt; - /* Test before calling to rule out calls from proc and such - * where we are not allowed to block. Note that this is - * actually reliable test even without the lock - the caller - * must assure that nobody can come after the DQUOT_DROP and - * add quota pointers back anyway */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] != NODQUOT) - break; - if (cnt < MAXQUOTAS) - inode->i_sb->dq_op->drop(inode); - } -} -EXPORT_SYMBOL(vfs_dq_drop); - -/* - * Following four functions update i_blocks+i_bytes fields and - * quota information (together with appropriate checks) - * NOTE: We absolutely rely on the fact that caller dirties - * the inode (usually macros in quotaops.h care about this) and - * holds a handle for the current transaction so that dquot write and - * inode write go into the same transaction. - */ - -/* - * This operation can block, but only after everything is updated - */ -int __dquot_alloc_space(struct inode *inode, qsize_t number, - int warn, int reserve) -{ - int cnt, ret = QUOTA_OK; - char warntype[MAXQUOTAS]; - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - warntype[cnt] = QUOTA_NL_NOWARN; - - spin_lock(&dq_data_lock); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) - == NO_QUOTA) { - ret = NO_QUOTA; - goto out_unlock; - } - } - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - if (reserve) - dquot_resv_space(inode->i_dquot[cnt], number); - else - dquot_incr_space(inode->i_dquot[cnt], number); - } - if (!reserve) - inode_add_bytes(inode, number); -out_unlock: - spin_unlock(&dq_data_lock); - flush_warnings(inode->i_dquot, warntype); - return ret; -} - -int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) -{ - int cnt, ret = QUOTA_OK; - - /* - * First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex - */ - if (IS_NOQUOTA(inode)) { - inode_add_bytes(inode, number); - goto out; - } - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { - inode_add_bytes(inode, number); - goto out_unlock; - } - - ret = __dquot_alloc_space(inode, number, warn, 0); - if (ret == NO_QUOTA) - goto out_unlock; - - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); -out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -out: - return ret; -} -EXPORT_SYMBOL(dquot_alloc_space); - -int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) -{ - int ret = QUOTA_OK; - - if (IS_NOQUOTA(inode)) - goto out; - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) - goto out_unlock; - - ret = __dquot_alloc_space(inode, number, warn, 1); -out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -out: - return ret; -} -EXPORT_SYMBOL(dquot_reserve_space); - -/* - * This operation can block, but only after everything is updated - */ -int dquot_alloc_inode(const struct inode *inode, qsize_t number) -{ - int cnt, ret = NO_QUOTA; - char warntype[MAXQUOTAS]; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) - return QUOTA_OK; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - warntype[cnt] = QUOTA_NL_NOWARN; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; - } - spin_lock(&dq_data_lock); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) - goto warn_put_all; - } - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - dquot_incr_inodes(inode->i_dquot[cnt], number); - } - ret = QUOTA_OK; -warn_put_all: - spin_unlock(&dq_data_lock); - if (ret == QUOTA_OK) - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); - flush_warnings(inode->i_dquot, warntype); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return ret; -} -EXPORT_SYMBOL(dquot_alloc_inode); - -int dquot_claim_space(struct inode *inode, qsize_t number) -{ - int cnt; - int ret = QUOTA_OK; - - if (IS_NOQUOTA(inode)) { - inode_add_bytes(inode, number); - goto out; - } - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - inode_add_bytes(inode, number); - goto out; - } - - spin_lock(&dq_data_lock); - /* Claim reserved quotas to allocated quotas */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] != NODQUOT) - dquot_claim_reserved_space(inode->i_dquot[cnt], - number); - } - /* Update inode bytes */ - inode_add_bytes(inode, number); - spin_unlock(&dq_data_lock); - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -out: - return ret; -} -EXPORT_SYMBOL(dquot_claim_space); - -/* - * Release reserved quota space - */ -void dquot_release_reserved_space(struct inode *inode, qsize_t number) -{ - int cnt; - - if (IS_NOQUOTA(inode)) - goto out; - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) - goto out_unlock; - - spin_lock(&dq_data_lock); - /* Release reserved dquots */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] != NODQUOT) - dquot_free_reserved_space(inode->i_dquot[cnt], number); - } - spin_unlock(&dq_data_lock); - -out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -out: - return; -} -EXPORT_SYMBOL(dquot_release_reserved_space); - -/* - * This operation can block, but only after everything is updated - */ -int dquot_free_space(struct inode *inode, qsize_t number) -{ - unsigned int cnt; - char warntype[MAXQUOTAS]; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) { -out_sub: - inode_sub_bytes(inode, number); - return QUOTA_OK; - } - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - /* Now recheck reliably when holding dqptr_sem */ - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - goto out_sub; - } - spin_lock(&dq_data_lock); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); - dquot_decr_space(inode->i_dquot[cnt], number); - } - inode_sub_bytes(inode, number); - spin_unlock(&dq_data_lock); - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); - flush_warnings(inode->i_dquot, warntype); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; -} -EXPORT_SYMBOL(dquot_free_space); - -/* - * This operation can block, but only after everything is updated - */ -int dquot_free_inode(const struct inode *inode, qsize_t number) -{ - unsigned int cnt; - char warntype[MAXQUOTAS]; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) - return QUOTA_OK; - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - /* Now recheck reliably when holding dqptr_sem */ - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; - } - spin_lock(&dq_data_lock); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); - dquot_decr_inodes(inode->i_dquot[cnt], number); - } - spin_unlock(&dq_data_lock); - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); - flush_warnings(inode->i_dquot, warntype); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; -} -EXPORT_SYMBOL(dquot_free_inode); - -/* - * call back function, get reserved quota space from underlying fs - */ -qsize_t dquot_get_reserved_space(struct inode *inode) -{ - qsize_t reserved_space = 0; - - if (sb_any_quota_active(inode->i_sb) && - inode->i_sb->dq_op->get_reserved_space) - reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); - return reserved_space; -} - -/* - * Transfer the number of inode and blocks from one diskquota to an other. - * - * This operation can block, but only after everything is updated - * A transaction must be started when entering this function. - */ -int dquot_transfer(struct inode *inode, struct iattr *iattr) -{ - qsize_t space, cur_space; - qsize_t rsv_space = 0; - struct dquot *transfer_from[MAXQUOTAS]; - struct dquot *transfer_to[MAXQUOTAS]; - int cnt, ret = QUOTA_OK; - int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, - chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; - char warntype_to[MAXQUOTAS]; - char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) - return QUOTA_OK; - /* Initialize the arrays */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - transfer_from[cnt] = NODQUOT; - transfer_to[cnt] = NODQUOT; - warntype_to[cnt] = QUOTA_NL_NOWARN; - switch (cnt) { - case USRQUOTA: - if (!chuid) - continue; - transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); - break; - case GRPQUOTA: - if (!chgid) - continue; - transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); - break; - } - } - - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - /* Now recheck reliably when holding dqptr_sem */ - if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - goto put_all; - } - spin_lock(&dq_data_lock); - cur_space = inode_get_bytes(inode); - rsv_space = dquot_get_reserved_space(inode); - space = cur_space + rsv_space; - /* Build the transfer_from list and check the limits */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (transfer_to[cnt] == NODQUOT) - continue; - transfer_from[cnt] = inode->i_dquot[cnt]; - if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == - NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, - warntype_to + cnt) == NO_QUOTA) - goto over_quota; - } - - /* - * Finally perform the needed transfer from transfer_from to transfer_to - */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - /* - * Skip changes for same uid or gid or for turned off quota-type. - */ - if (transfer_to[cnt] == NODQUOT) - continue; - - /* Due to IO error we might not have transfer_from[] structure */ - if (transfer_from[cnt]) { - warntype_from_inodes[cnt] = - info_idq_free(transfer_from[cnt], 1); - warntype_from_space[cnt] = - info_bdq_free(transfer_from[cnt], space); - dquot_decr_inodes(transfer_from[cnt], 1); - dquot_decr_space(transfer_from[cnt], cur_space); - dquot_free_reserved_space(transfer_from[cnt], - rsv_space); - } - - dquot_incr_inodes(transfer_to[cnt], 1); - dquot_incr_space(transfer_to[cnt], cur_space); - dquot_resv_space(transfer_to[cnt], rsv_space); - - inode->i_dquot[cnt] = transfer_to[cnt]; - } - spin_unlock(&dq_data_lock); - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (transfer_from[cnt]) - mark_dquot_dirty(transfer_from[cnt]); - if (transfer_to[cnt]) { - mark_dquot_dirty(transfer_to[cnt]); - /* The reference we got is transferred to the inode */ - transfer_to[cnt] = NODQUOT; - } - } -warn_put_all: - flush_warnings(transfer_to, warntype_to); - flush_warnings(transfer_from, warntype_from_inodes); - flush_warnings(transfer_from, warntype_from_space); -put_all: - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - dqput(transfer_from[cnt]); - dqput(transfer_to[cnt]); - } - return ret; -over_quota: - spin_unlock(&dq_data_lock); - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - /* Clear dquot pointers we don't want to dqput() */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - transfer_from[cnt] = NODQUOT; - ret = NO_QUOTA; - goto warn_put_all; -} -EXPORT_SYMBOL(dquot_transfer); - -/* Wrapper for transferring ownership of an inode */ -int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) -{ - if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { - vfs_dq_init(inode); - if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) - return 1; - } - return 0; -} -EXPORT_SYMBOL(vfs_dq_transfer); - -/* - * Write info of quota file to disk - */ -int dquot_commit_info(struct super_block *sb, int type) -{ - int ret; - struct quota_info *dqopt = sb_dqopt(sb); - - mutex_lock(&dqopt->dqio_mutex); - ret = dqopt->ops[type]->write_file_info(sb, type); - mutex_unlock(&dqopt->dqio_mutex); - return ret; -} -EXPORT_SYMBOL(dquot_commit_info); - -/* - * Definitions of diskquota operations. - */ -struct dquot_operations dquot_operations = { - .initialize = dquot_initialize, - .drop = dquot_drop, - .alloc_space = dquot_alloc_space, - .alloc_inode = dquot_alloc_inode, - .free_space = dquot_free_space, - .free_inode = dquot_free_inode, - .transfer = dquot_transfer, - .write_dquot = dquot_commit, - .acquire_dquot = dquot_acquire, - .release_dquot = dquot_release, - .mark_dirty = dquot_mark_dquot_dirty, - .write_info = dquot_commit_info, - .alloc_dquot = dquot_alloc, - .destroy_dquot = dquot_destroy, -}; - -/* - * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) - */ -int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) -{ - int cnt, ret = 0; - struct quota_info *dqopt = sb_dqopt(sb); - struct inode *toputinode[MAXQUOTAS]; - - /* Cannot turn off usage accounting without turning off limits, or - * suspend quotas and simultaneously turn quotas off. */ - if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) - || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | - DQUOT_USAGE_ENABLED))) - return -EINVAL; - - /* We need to serialize quota_off() for device */ - mutex_lock(&dqopt->dqonoff_mutex); - - /* - * Skip everything if there's nothing to do. We have to do this because - * sometimes we are called when fill_super() failed and calling - * sync_fs() in such cases does no good. - */ - if (!sb_any_quota_loaded(sb)) { - mutex_unlock(&dqopt->dqonoff_mutex); - return 0; - } - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - toputinode[cnt] = NULL; - if (type != -1 && cnt != type) - continue; - if (!sb_has_quota_loaded(sb, cnt)) - continue; - - if (flags & DQUOT_SUSPENDED) { - spin_lock(&dq_state_lock); - dqopt->flags |= - dquot_state_flag(DQUOT_SUSPENDED, cnt); - spin_unlock(&dq_state_lock); - } else { - spin_lock(&dq_state_lock); - dqopt->flags &= ~dquot_state_flag(flags, cnt); - /* Turning off suspended quotas? */ - if (!sb_has_quota_loaded(sb, cnt) && - sb_has_quota_suspended(sb, cnt)) { - dqopt->flags &= ~dquot_state_flag( - DQUOT_SUSPENDED, cnt); - spin_unlock(&dq_state_lock); - iput(dqopt->files[cnt]); - dqopt->files[cnt] = NULL; - continue; - } - spin_unlock(&dq_state_lock); - } - - /* We still have to keep quota loaded? */ - if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) - continue; - - /* Note: these are blocking operations */ - drop_dquot_ref(sb, cnt); - invalidate_dquots(sb, cnt); - /* - * Now all dquots should be invalidated, all writes done so we should be only - * users of the info. No locks needed. - */ - if (info_dirty(&dqopt->info[cnt])) - sb->dq_op->write_info(sb, cnt); - if (dqopt->ops[cnt]->free_file_info) - dqopt->ops[cnt]->free_file_info(sb, cnt); - put_quota_format(dqopt->info[cnt].dqi_format); - - toputinode[cnt] = dqopt->files[cnt]; - if (!sb_has_quota_loaded(sb, cnt)) - dqopt->files[cnt] = NULL; - dqopt->info[cnt].dqi_flags = 0; - dqopt->info[cnt].dqi_igrace = 0; - dqopt->info[cnt].dqi_bgrace = 0; - dqopt->ops[cnt] = NULL; - } - mutex_unlock(&dqopt->dqonoff_mutex); - - /* Skip syncing and setting flags if quota files are hidden */ - if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) - goto put_inodes; - - /* Sync the superblock so that buffers with quota data are written to - * disk (and so userspace sees correct data afterwards). */ - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); - /* Now the quota files are just ordinary files and we can set the - * inode flags back. Moreover we discard the pagecache so that - * userspace sees the writes we did bypassing the pagecache. We - * must also discard the blockdev buffers so that we see the - * changes done by userspace on the next quotaon() */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (toputinode[cnt]) { - mutex_lock(&dqopt->dqonoff_mutex); - /* If quota was reenabled in the meantime, we have - * nothing to do */ - if (!sb_has_quota_loaded(sb, cnt)) { - mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); - toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | - S_NOATIME | S_NOQUOTA); - truncate_inode_pages(&toputinode[cnt]->i_data, 0); - mutex_unlock(&toputinode[cnt]->i_mutex); - mark_inode_dirty(toputinode[cnt]); - } - mutex_unlock(&dqopt->dqonoff_mutex); - } - if (sb->s_bdev) - invalidate_bdev(sb->s_bdev); -put_inodes: - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (toputinode[cnt]) { - /* On remount RO, we keep the inode pointer so that we - * can reenable quota on the subsequent remount RW. We - * have to check 'flags' variable and not use sb_has_ - * function because another quotaon / quotaoff could - * change global state before we got here. We refuse - * to suspend quotas when there is pending delete on - * the quota file... */ - if (!(flags & DQUOT_SUSPENDED)) - iput(toputinode[cnt]); - else if (!toputinode[cnt]->i_nlink) - ret = -EBUSY; - } - return ret; -} -EXPORT_SYMBOL(vfs_quota_disable); - -int vfs_quota_off(struct super_block *sb, int type, int remount) -{ - return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : - (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); -} -EXPORT_SYMBOL(vfs_quota_off); -/* - * Turn quotas on on a device - */ - -/* - * Helper function to turn quotas on when we already have the inode of - * quota file and no quota information is loaded. - */ -static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, - unsigned int flags) -{ - struct quota_format_type *fmt = find_quota_format(format_id); - struct super_block *sb = inode->i_sb; - struct quota_info *dqopt = sb_dqopt(sb); - int error; - int oldflags = -1; - - if (!fmt) - return -ESRCH; - if (!S_ISREG(inode->i_mode)) { - error = -EACCES; - goto out_fmt; - } - if (IS_RDONLY(inode)) { - error = -EROFS; - goto out_fmt; - } - if (!sb->s_op->quota_write || !sb->s_op->quota_read) { - error = -EINVAL; - goto out_fmt; - } - /* Usage always has to be set... */ - if (!(flags & DQUOT_USAGE_ENABLED)) { - error = -EINVAL; - goto out_fmt; - } - - if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { - /* As we bypass the pagecache we must now flush the inode so - * that we see all the changes from userspace... */ - write_inode_now(inode, 1); - /* And now flush the block cache so that kernel sees the - * changes */ - invalidate_bdev(sb->s_bdev); - } - mutex_lock(&inode->i_mutex); - mutex_lock(&dqopt->dqonoff_mutex); - if (sb_has_quota_loaded(sb, type)) { - error = -EBUSY; - goto out_lock; - } - - if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { - /* We don't want quota and atime on quota files (deadlocks - * possible) Also nobody should write to the file - we use - * special IO operations which ignore the immutable bit. */ - down_write(&dqopt->dqptr_sem); - oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); - inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; - up_write(&dqopt->dqptr_sem); - sb->dq_op->drop(inode); - } - - error = -EIO; - dqopt->files[type] = igrab(inode); - if (!dqopt->files[type]) - goto out_lock; - error = -EINVAL; - if (!fmt->qf_ops->check_quota_file(sb, type)) - goto out_file_init; - - dqopt->ops[type] = fmt->qf_ops; - dqopt->info[type].dqi_format = fmt; - dqopt->info[type].dqi_fmt_id = format_id; - INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); - mutex_lock(&dqopt->dqio_mutex); - if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { - mutex_unlock(&dqopt->dqio_mutex); - goto out_file_init; - } - mutex_unlock(&dqopt->dqio_mutex); - mutex_unlock(&inode->i_mutex); - spin_lock(&dq_state_lock); - dqopt->flags |= dquot_state_flag(flags, type); - spin_unlock(&dq_state_lock); - - add_dquot_ref(sb, type); - mutex_unlock(&dqopt->dqonoff_mutex); - - return 0; - -out_file_init: - dqopt->files[type] = NULL; - iput(inode); -out_lock: - mutex_unlock(&dqopt->dqonoff_mutex); - if (oldflags != -1) { - down_write(&dqopt->dqptr_sem); - /* Set the flags back (in the case of accidental quotaon() - * on a wrong file we don't want to mess up the flags) */ - inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); - inode->i_flags |= oldflags; - up_write(&dqopt->dqptr_sem); - } - mutex_unlock(&inode->i_mutex); -out_fmt: - put_quota_format(fmt); - - return error; -} - -/* Reenable quotas on remount RW */ -static int vfs_quota_on_remount(struct super_block *sb, int type) -{ - struct quota_info *dqopt = sb_dqopt(sb); - struct inode *inode; - int ret; - unsigned int flags; - - mutex_lock(&dqopt->dqonoff_mutex); - if (!sb_has_quota_suspended(sb, type)) { - mutex_unlock(&dqopt->dqonoff_mutex); - return 0; - } - inode = dqopt->files[type]; - dqopt->files[type] = NULL; - spin_lock(&dq_state_lock); - flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | - DQUOT_LIMITS_ENABLED, type); - dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); - spin_unlock(&dq_state_lock); - mutex_unlock(&dqopt->dqonoff_mutex); - - flags = dquot_generic_flag(flags, type); - ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, - flags); - iput(inode); - - return ret; -} - -int vfs_quota_on_path(struct super_block *sb, int type, int format_id, - struct path *path) -{ - int error = security_quota_on(path->dentry); - if (error) - return error; - /* Quota file not on the same filesystem? */ - if (path->mnt->mnt_sb != sb) - error = -EXDEV; - else - error = vfs_load_quota_inode(path->dentry->d_inode, type, - format_id, DQUOT_USAGE_ENABLED | - DQUOT_LIMITS_ENABLED); - return error; -} -EXPORT_SYMBOL(vfs_quota_on_path); - -int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, - int remount) -{ - struct path path; - int error; - - if (remount) - return vfs_quota_on_remount(sb, type); - - error = kern_path(name, LOOKUP_FOLLOW, &path); - if (!error) { - error = vfs_quota_on_path(sb, type, format_id, &path); - path_put(&path); - } - return error; -} -EXPORT_SYMBOL(vfs_quota_on); - -/* - * More powerful function for turning on quotas allowing setting - * of individual quota flags - */ -int vfs_quota_enable(struct inode *inode, int type, int format_id, - unsigned int flags) -{ - int ret = 0; - struct super_block *sb = inode->i_sb; - struct quota_info *dqopt = sb_dqopt(sb); - - /* Just unsuspend quotas? */ - if (flags & DQUOT_SUSPENDED) - return vfs_quota_on_remount(sb, type); - if (!flags) - return 0; - /* Just updating flags needed? */ - if (sb_has_quota_loaded(sb, type)) { - mutex_lock(&dqopt->dqonoff_mutex); - /* Now do a reliable test... */ - if (!sb_has_quota_loaded(sb, type)) { - mutex_unlock(&dqopt->dqonoff_mutex); - goto load_quota; - } - if (flags & DQUOT_USAGE_ENABLED && - sb_has_quota_usage_enabled(sb, type)) { - ret = -EBUSY; - goto out_lock; - } - if (flags & DQUOT_LIMITS_ENABLED && - sb_has_quota_limits_enabled(sb, type)) { - ret = -EBUSY; - goto out_lock; - } - spin_lock(&dq_state_lock); - sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); - spin_unlock(&dq_state_lock); -out_lock: - mutex_unlock(&dqopt->dqonoff_mutex); - return ret; - } - -load_quota: - return vfs_load_quota_inode(inode, type, format_id, flags); -} -EXPORT_SYMBOL(vfs_quota_enable); - -/* - * This function is used when filesystem needs to initialize quotas - * during mount time. - */ -int vfs_quota_on_mount(struct super_block *sb, char *qf_name, - int format_id, int type) -{ - struct dentry *dentry; - int error; - - dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - if (!dentry->d_inode) { - error = -ENOENT; - goto out; - } - - error = security_quota_on(dentry); - if (!error) - error = vfs_load_quota_inode(dentry->d_inode, type, format_id, - DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); - -out: - dput(dentry); - return error; -} -EXPORT_SYMBOL(vfs_quota_on_mount); - -/* Wrapper to turn on quotas when remounting rw */ -int vfs_dq_quota_on_remount(struct super_block *sb) -{ - int cnt; - int ret = 0, err; - - if (!sb->s_qcop || !sb->s_qcop->quota_on) - return -ENOSYS; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); - if (err < 0 && !ret) - ret = err; - } - return ret; -} -EXPORT_SYMBOL(vfs_dq_quota_on_remount); - -static inline qsize_t qbtos(qsize_t blocks) -{ - return blocks << QIF_DQBLKSIZE_BITS; -} - -static inline qsize_t stoqb(qsize_t space) -{ - return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; -} - -/* Generic routine for getting common part of quota structure */ -static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) -{ - struct mem_dqblk *dm = &dquot->dq_dqb; - - spin_lock(&dq_data_lock); - di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); - di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); - di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; - di->dqb_ihardlimit = dm->dqb_ihardlimit; - di->dqb_isoftlimit = dm->dqb_isoftlimit; - di->dqb_curinodes = dm->dqb_curinodes; - di->dqb_btime = dm->dqb_btime; - di->dqb_itime = dm->dqb_itime; - di->dqb_valid = QIF_ALL; - spin_unlock(&dq_data_lock); -} - -int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) -{ - struct dquot *dquot; - - dquot = dqget(sb, id, type); - if (dquot == NODQUOT) - return -ESRCH; - do_get_dqblk(dquot, di); - dqput(dquot); - - return 0; -} -EXPORT_SYMBOL(vfs_get_dqblk); - -/* Generic routine for setting common part of quota structure */ -static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) -{ - struct mem_dqblk *dm = &dquot->dq_dqb; - int check_blim = 0, check_ilim = 0; - struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; - - if ((di->dqb_valid & QIF_BLIMITS && - (di->dqb_bhardlimit > dqi->dqi_maxblimit || - di->dqb_bsoftlimit > dqi->dqi_maxblimit)) || - (di->dqb_valid & QIF_ILIMITS && - (di->dqb_ihardlimit > dqi->dqi_maxilimit || - di->dqb_isoftlimit > dqi->dqi_maxilimit))) - return -ERANGE; - - spin_lock(&dq_data_lock); - if (di->dqb_valid & QIF_SPACE) { - dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; - check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_BLIMITS) { - dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); - dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); - check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_INODES) { - dm->dqb_curinodes = di->dqb_curinodes; - check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_ILIMITS) { - dm->dqb_isoftlimit = di->dqb_isoftlimit; - dm->dqb_ihardlimit = di->dqb_ihardlimit; - check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_BTIME) { - dm->dqb_btime = di->dqb_btime; - check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_ITIME) { - dm->dqb_itime = di->dqb_itime; - check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); - } - - if (check_blim) { - if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { - dm->dqb_btime = 0; - clear_bit(DQ_BLKS_B, &dquot->dq_flags); - } - else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ - dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; - } - if (check_ilim) { - if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { - dm->dqb_itime = 0; - clear_bit(DQ_INODES_B, &dquot->dq_flags); - } - else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ - dm->dqb_itime = get_seconds() + dqi->dqi_igrace; - } - if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) - clear_bit(DQ_FAKE_B, &dquot->dq_flags); - else - set_bit(DQ_FAKE_B, &dquot->dq_flags); - spin_unlock(&dq_data_lock); - mark_dquot_dirty(dquot); - - return 0; -} - -int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) -{ - struct dquot *dquot; - int rc; - - dquot = dqget(sb, id, type); - if (!dquot) { - rc = -ESRCH; - goto out; - } - rc = do_set_dqblk(dquot, di); - dqput(dquot); -out: - return rc; -} -EXPORT_SYMBOL(vfs_set_dqblk); - -/* Generic routine for getting common part of quota file information */ -int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) -{ - struct mem_dqinfo *mi; - - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - if (!sb_has_quota_active(sb, type)) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return -ESRCH; - } - mi = sb_dqopt(sb)->info + type; - spin_lock(&dq_data_lock); - ii->dqi_bgrace = mi->dqi_bgrace; - ii->dqi_igrace = mi->dqi_igrace; - ii->dqi_flags = mi->dqi_flags & DQF_MASK; - ii->dqi_valid = IIF_ALL; - spin_unlock(&dq_data_lock); - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return 0; -} -EXPORT_SYMBOL(vfs_get_dqinfo); - -/* Generic routine for setting common part of quota file information */ -int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) -{ - struct mem_dqinfo *mi; - int err = 0; - - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - if (!sb_has_quota_active(sb, type)) { - err = -ESRCH; - goto out; - } - mi = sb_dqopt(sb)->info + type; - spin_lock(&dq_data_lock); - if (ii->dqi_valid & IIF_BGRACE) - mi->dqi_bgrace = ii->dqi_bgrace; - if (ii->dqi_valid & IIF_IGRACE) - mi->dqi_igrace = ii->dqi_igrace; - if (ii->dqi_valid & IIF_FLAGS) - mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); - spin_unlock(&dq_data_lock); - mark_info_dirty(sb, type); - /* Force write to disk */ - sb->dq_op->write_info(sb, type); -out: - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return err; -} -EXPORT_SYMBOL(vfs_set_dqinfo); - -struct quotactl_ops vfs_quotactl_ops = { - .quota_on = vfs_quota_on, - .quota_off = vfs_quota_off, - .quota_sync = vfs_quota_sync, - .get_info = vfs_get_dqinfo, - .set_info = vfs_set_dqinfo, - .get_dqblk = vfs_get_dqblk, - .set_dqblk = vfs_set_dqblk -}; - -static ctl_table fs_dqstats_table[] = { - { - .ctl_name = FS_DQ_LOOKUPS, - .procname = "lookups", - .data = &dqstats.lookups, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_DROPS, - .procname = "drops", - .data = &dqstats.drops, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_READS, - .procname = "reads", - .data = &dqstats.reads, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_WRITES, - .procname = "writes", - .data = &dqstats.writes, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_CACHE_HITS, - .procname = "cache_hits", - .data = &dqstats.cache_hits, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_ALLOCATED, - .procname = "allocated_dquots", - .data = &dqstats.allocated_dquots, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_FREE, - .procname = "free_dquots", - .data = &dqstats.free_dquots, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_SYNCS, - .procname = "syncs", - .data = &dqstats.syncs, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, -#ifdef CONFIG_PRINT_QUOTA_WARNING - { - .ctl_name = FS_DQ_WARNINGS, - .procname = "warnings", - .data = &flag_print_warnings, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, -#endif - { .ctl_name = 0 }, -}; - -static ctl_table fs_table[] = { - { - .ctl_name = FS_DQSTATS, - .procname = "quota", - .mode = 0555, - .child = fs_dqstats_table, - }, - { .ctl_name = 0 }, -}; - -static ctl_table sys_table[] = { - { - .ctl_name = CTL_FS, - .procname = "fs", - .mode = 0555, - .child = fs_table, - }, - { .ctl_name = 0 }, -}; - -static int __init dquot_init(void) -{ - int i; - unsigned long nr_hash, order; - - printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); - - register_sysctl_table(sys_table); - - dquot_cachep = kmem_cache_create("dquot", - sizeof(struct dquot), sizeof(unsigned long) * 4, - (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_PANIC), - NULL); - - order = 0; - dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); - if (!dquot_hash) - panic("Cannot create dquot hash table"); - - /* Find power-of-two hlist_heads which can fit into allocation */ - nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); - dq_hash_bits = 0; - do { - dq_hash_bits++; - } while (nr_hash >> dq_hash_bits); - dq_hash_bits--; - - nr_hash = 1UL << dq_hash_bits; - dq_hash_mask = nr_hash - 1; - for (i = 0; i < nr_hash; i++) - INIT_HLIST_HEAD(dquot_hash + i); - - printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", - nr_hash, order, (PAGE_SIZE << order)); - - register_shrinker(&dqcache_shrinker); - -#ifdef CONFIG_QUOTA_NETLINK_INTERFACE - if (genl_register_family("a_genl_family) != 0) - printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); -#endif - - return 0; -} -module_init(dquot_init); diff --git a/fs/quota.c b/fs/quota.c deleted file mode 100644 index d76ada914f9..00000000000 --- a/fs/quota.c +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Quota code necessary even when VFS quota support is not compiled - * into the kernel. The interesting stuff is over in dquot.c, here - * we have symbols for initial quotactl(2) handling, the sysctl(2) - * variables, etc - things needed even when quota support disabled. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Check validity of generic quotactl commands */ -static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) -{ - if (type >= MAXQUOTAS) - return -EINVAL; - if (!sb && cmd != Q_SYNC) - return -ENODEV; - /* Is operation supported? */ - if (sb && !sb->s_qcop) - return -ENOSYS; - - switch (cmd) { - case Q_GETFMT: - break; - case Q_QUOTAON: - if (!sb->s_qcop->quota_on) - return -ENOSYS; - break; - case Q_QUOTAOFF: - if (!sb->s_qcop->quota_off) - return -ENOSYS; - break; - case Q_SETINFO: - if (!sb->s_qcop->set_info) - return -ENOSYS; - break; - case Q_GETINFO: - if (!sb->s_qcop->get_info) - return -ENOSYS; - break; - case Q_SETQUOTA: - if (!sb->s_qcop->set_dqblk) - return -ENOSYS; - break; - case Q_GETQUOTA: - if (!sb->s_qcop->get_dqblk) - return -ENOSYS; - break; - case Q_SYNC: - if (sb && !sb->s_qcop->quota_sync) - return -ENOSYS; - break; - default: - return -EINVAL; - } - - /* Is quota turned on for commands which need it? */ - switch (cmd) { - case Q_GETFMT: - case Q_GETINFO: - case Q_SETINFO: - case Q_SETQUOTA: - case Q_GETQUOTA: - /* This is just informative test so we are satisfied without a lock */ - if (!sb_has_quota_active(sb, type)) - return -ESRCH; - } - - /* Check privileges */ - if (cmd == Q_GETQUOTA) { - if (((type == USRQUOTA && current_euid() != id) || - (type == GRPQUOTA && !in_egroup_p(id))) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - } - else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO) - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - return 0; -} - -/* Check validity of XFS Quota Manager commands */ -static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) -{ - if (type >= XQM_MAXQUOTAS) - return -EINVAL; - if (!sb) - return -ENODEV; - if (!sb->s_qcop) - return -ENOSYS; - - switch (cmd) { - case Q_XQUOTAON: - case Q_XQUOTAOFF: - case Q_XQUOTARM: - if (!sb->s_qcop->set_xstate) - return -ENOSYS; - break; - case Q_XGETQSTAT: - if (!sb->s_qcop->get_xstate) - return -ENOSYS; - break; - case Q_XSETQLIM: - if (!sb->s_qcop->set_xquota) - return -ENOSYS; - break; - case Q_XGETQUOTA: - if (!sb->s_qcop->get_xquota) - return -ENOSYS; - break; - case Q_XQUOTASYNC: - if (!sb->s_qcop->quota_sync) - return -ENOSYS; - break; - default: - return -EINVAL; - } - - /* Check privileges */ - if (cmd == Q_XGETQUOTA) { - if (((type == XQM_USRQUOTA && current_euid() != id) || - (type == XQM_GRPQUOTA && !in_egroup_p(id))) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) { - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - } - - return 0; -} - -static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) -{ - int error; - - if (XQM_COMMAND(cmd)) - error = xqm_quotactl_valid(sb, type, cmd, id); - else - error = generic_quotactl_valid(sb, type, cmd, id); - if (!error) - error = security_quotactl(cmd, type, id, sb); - return error; -} - -static void quota_sync_sb(struct super_block *sb, int type) -{ - int cnt; - - sb->s_qcop->quota_sync(sb, type); - - if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE) - return; - /* This is not very clever (and fast) but currently I don't know about - * any other simple way of getting quota data to disk and we must get - * them there for userspace to be visible... */ - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); - - /* - * Now when everything is written we can discard the pagecache so - * that userspace sees the changes. - */ - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && cnt != type) - continue; - if (!sb_has_quota_active(sb, cnt)) - continue; - mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); - truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); - mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); - } - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); -} - -void sync_dquots(struct super_block *sb, int type) -{ - int cnt; - - if (sb) { - if (sb->s_qcop->quota_sync) - quota_sync_sb(sb, type); - return; - } - - spin_lock(&sb_lock); -restart: - list_for_each_entry(sb, &super_blocks, s_list) { - /* This test just improves performance so it needn't be reliable... */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && type != cnt) - continue; - if (!sb_has_quota_active(sb, cnt)) - continue; - if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && - list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) - continue; - break; - } - if (cnt == MAXQUOTAS) - continue; - sb->s_count++; - spin_unlock(&sb_lock); - down_read(&sb->s_umount); - if (sb->s_root && sb->s_qcop->quota_sync) - quota_sync_sb(sb, type); - up_read(&sb->s_umount); - spin_lock(&sb_lock); - if (__put_super_and_need_restart(sb)) - goto restart; - } - spin_unlock(&sb_lock); -} - -/* Copy parameters and call proper function */ -static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) -{ - int ret; - - switch (cmd) { - case Q_QUOTAON: { - char *pathname; - - if (IS_ERR(pathname = getname(addr))) - return PTR_ERR(pathname); - ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); - putname(pathname); - return ret; - } - case Q_QUOTAOFF: - return sb->s_qcop->quota_off(sb, type, 0); - - case Q_GETFMT: { - __u32 fmt; - - down_read(&sb_dqopt(sb)->dqptr_sem); - if (!sb_has_quota_active(sb, type)) { - up_read(&sb_dqopt(sb)->dqptr_sem); - return -ESRCH; - } - fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; - up_read(&sb_dqopt(sb)->dqptr_sem); - if (copy_to_user(addr, &fmt, sizeof(fmt))) - return -EFAULT; - return 0; - } - case Q_GETINFO: { - struct if_dqinfo info; - - if ((ret = sb->s_qcop->get_info(sb, type, &info))) - return ret; - if (copy_to_user(addr, &info, sizeof(info))) - return -EFAULT; - return 0; - } - case Q_SETINFO: { - struct if_dqinfo info; - - if (copy_from_user(&info, addr, sizeof(info))) - return -EFAULT; - return sb->s_qcop->set_info(sb, type, &info); - } - case Q_GETQUOTA: { - struct if_dqblk idq; - - if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) - return ret; - if (copy_to_user(addr, &idq, sizeof(idq))) - return -EFAULT; - return 0; - } - case Q_SETQUOTA: { - struct if_dqblk idq; - - if (copy_from_user(&idq, addr, sizeof(idq))) - return -EFAULT; - return sb->s_qcop->set_dqblk(sb, type, id, &idq); - } - case Q_SYNC: - sync_dquots(sb, type); - return 0; - - case Q_XQUOTAON: - case Q_XQUOTAOFF: - case Q_XQUOTARM: { - __u32 flags; - - if (copy_from_user(&flags, addr, sizeof(flags))) - return -EFAULT; - return sb->s_qcop->set_xstate(sb, flags, cmd); - } - case Q_XGETQSTAT: { - struct fs_quota_stat fqs; - - if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) - return ret; - if (copy_to_user(addr, &fqs, sizeof(fqs))) - return -EFAULT; - return 0; - } - case Q_XSETQLIM: { - struct fs_disk_quota fdq; - - if (copy_from_user(&fdq, addr, sizeof(fdq))) - return -EFAULT; - return sb->s_qcop->set_xquota(sb, type, id, &fdq); - } - case Q_XGETQUOTA: { - struct fs_disk_quota fdq; - - if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) - return ret; - if (copy_to_user(addr, &fdq, sizeof(fdq))) - return -EFAULT; - return 0; - } - case Q_XQUOTASYNC: - return sb->s_qcop->quota_sync(sb, type); - /* We never reach here unless validity check is broken */ - default: - BUG(); - } - return 0; -} - -/* - * look up a superblock on which quota ops will be performed - * - use the name of a block device to find the superblock thereon - */ -static inline struct super_block *quotactl_block(const char __user *special) -{ -#ifdef CONFIG_BLOCK - struct block_device *bdev; - struct super_block *sb; - char *tmp = getname(special); - - if (IS_ERR(tmp)) - return ERR_CAST(tmp); - bdev = lookup_bdev(tmp); - putname(tmp); - if (IS_ERR(bdev)) - return ERR_CAST(bdev); - sb = get_super(bdev); - bdput(bdev); - if (!sb) - return ERR_PTR(-ENODEV); - - return sb; -#else - return ERR_PTR(-ENODEV); -#endif -} - -/* - * This is the system call interface. This communicates with - * the user-level programs. Currently this only supports diskquota - * calls. Maybe we need to add the process quotas etc. in the future, - * but we probably should use rlimits for that. - */ -SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, - qid_t, id, void __user *, addr) -{ - uint cmds, type; - struct super_block *sb = NULL; - int ret; - - cmds = cmd >> SUBCMDSHIFT; - type = cmd & SUBCMDMASK; - - if (cmds != Q_SYNC || special) { - sb = quotactl_block(special); - if (IS_ERR(sb)) - return PTR_ERR(sb); - } - - ret = check_quotactl_valid(sb, type, cmds, id); - if (ret >= 0) - ret = do_quotactl(sb, type, cmds, id, addr); - if (sb) - drop_super(sb); - - return ret; -} - -#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT) -/* - * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) - * and is necessary due to alignment problems. - */ -struct compat_if_dqblk { - compat_u64 dqb_bhardlimit; - compat_u64 dqb_bsoftlimit; - compat_u64 dqb_curspace; - compat_u64 dqb_ihardlimit; - compat_u64 dqb_isoftlimit; - compat_u64 dqb_curinodes; - compat_u64 dqb_btime; - compat_u64 dqb_itime; - compat_uint_t dqb_valid; -}; - -/* XFS structures */ -struct compat_fs_qfilestat { - compat_u64 dqb_bhardlimit; - compat_u64 qfs_nblks; - compat_uint_t qfs_nextents; -}; - -struct compat_fs_quota_stat { - __s8 qs_version; - __u16 qs_flags; - __s8 qs_pad; - struct compat_fs_qfilestat qs_uquota; - struct compat_fs_qfilestat qs_gquota; - compat_uint_t qs_incoredqs; - compat_int_t qs_btimelimit; - compat_int_t qs_itimelimit; - compat_int_t qs_rtbtimelimit; - __u16 qs_bwarnlimit; - __u16 qs_iwarnlimit; -}; - -asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, - qid_t id, void __user *addr) -{ - unsigned int cmds; - struct if_dqblk __user *dqblk; - struct compat_if_dqblk __user *compat_dqblk; - struct fs_quota_stat __user *fsqstat; - struct compat_fs_quota_stat __user *compat_fsqstat; - compat_uint_t data; - u16 xdata; - long ret; - - cmds = cmd >> SUBCMDSHIFT; - - switch (cmds) { - case Q_GETQUOTA: - dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); - compat_dqblk = addr; - ret = sys_quotactl(cmd, special, id, dqblk); - if (ret) - break; - if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || - get_user(data, &dqblk->dqb_valid) || - put_user(data, &compat_dqblk->dqb_valid)) - ret = -EFAULT; - break; - case Q_SETQUOTA: - dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); - compat_dqblk = addr; - ret = -EFAULT; - if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || - get_user(data, &compat_dqblk->dqb_valid) || - put_user(data, &dqblk->dqb_valid)) - break; - ret = sys_quotactl(cmd, special, id, dqblk); - break; - case Q_XGETQSTAT: - fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); - compat_fsqstat = addr; - ret = sys_quotactl(cmd, special, id, fsqstat); - if (ret) - break; - ret = -EFAULT; - /* Copying qs_version, qs_flags, qs_pad */ - if (copy_in_user(compat_fsqstat, fsqstat, - offsetof(struct compat_fs_quota_stat, qs_uquota))) - break; - /* Copying qs_uquota */ - if (copy_in_user(&compat_fsqstat->qs_uquota, - &fsqstat->qs_uquota, - sizeof(compat_fsqstat->qs_uquota)) || - get_user(data, &fsqstat->qs_uquota.qfs_nextents) || - put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) - break; - /* Copying qs_gquota */ - if (copy_in_user(&compat_fsqstat->qs_gquota, - &fsqstat->qs_gquota, - sizeof(compat_fsqstat->qs_gquota)) || - get_user(data, &fsqstat->qs_gquota.qfs_nextents) || - put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) - break; - /* Copying the rest */ - if (copy_in_user(&compat_fsqstat->qs_incoredqs, - &fsqstat->qs_incoredqs, - sizeof(struct compat_fs_quota_stat) - - offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || - get_user(xdata, &fsqstat->qs_iwarnlimit) || - put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) - break; - ret = 0; - break; - default: - ret = sys_quotactl(cmd, special, id, addr); - } - return ret; -} -#endif diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig new file mode 100644 index 00000000000..d9750d86fde --- /dev/null +++ b/fs/quota/Kconfig @@ -0,0 +1,59 @@ +# +# Quota configuration +# + +config QUOTA + bool "Quota support" + help + If you say Y here, you will be able to set per user limits for disk + usage (also called disk quotas). Currently, it works for the + ext2, ext3, and reiserfs file system. ext3 also supports journalled + quotas for which you don't need to run quotacheck(8) after an unclean + shutdown. + For further details, read the Quota mini-HOWTO, available from + , or the documentation provided + with the quota tools. Probably the quota support is only useful for + multi user systems. If unsure, say N. + +config QUOTA_NETLINK_INTERFACE + bool "Report quota messages through netlink interface" + depends on QUOTA && NET + help + If you say Y here, quota warnings (about exceeding softlimit, reaching + hardlimit, etc.) will be reported through netlink interface. If unsure, + say Y. + +config PRINT_QUOTA_WARNING + bool "Print quota warnings to console (OBSOLETE)" + depends on QUOTA + default y + help + If you say Y here, quota warnings (about exceeding softlimit, reaching + hardlimit, etc.) will be printed to the process' controlling terminal. + Note that this behavior is currently deprecated and may go away in + future. Please use notification via netlink socket instead. + +# Generic support for tree structured quota files. Seleted when needed. +config QUOTA_TREE + tristate + +config QFMT_V1 + tristate "Old quota format support" + depends on QUOTA + help + This quota format was (is) used by kernels earlier than 2.4.22. If + you have quota working and you don't want to convert to new quota + format say Y here. + +config QFMT_V2 + tristate "Quota format v2 support" + depends on QUOTA + select QUOTA_TREE + help + This quota format allows using quotas with 32-bit UIDs/GIDs. If you + need this functionality say Y here. + +config QUOTACTL + bool + depends on XFS_QUOTA || QUOTA + default y diff --git a/fs/quota/Makefile b/fs/quota/Makefile new file mode 100644 index 00000000000..385a0831cc9 --- /dev/null +++ b/fs/quota/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the Linux filesystems. +# +# 14 Sep 2000, Christoph Hellwig +# Rewritten to use lists instead of if-statements. +# + +obj-y := + +obj-$(CONFIG_QUOTA) += dquot.o +obj-$(CONFIG_QFMT_V1) += quota_v1.o +obj-$(CONFIG_QFMT_V2) += quota_v2.o +obj-$(CONFIG_QUOTA_TREE) += quota_tree.o +obj-$(CONFIG_QUOTACTL) += quota.o diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c new file mode 100644 index 00000000000..28aa1466760 --- /dev/null +++ b/fs/quota/dquot.c @@ -0,0 +1,2564 @@ +/* + * Implementation of the diskquota system for the LINUX operating system. QUOTA + * is implemented using the BSD system call interface as the means of + * communication with the user level. This file contains the generic routines + * called by the different filesystems on allocation of an inode or block. + * These routines take care of the administration needed to have a consistent + * diskquota tracking system. The ideas of both user and group quotas are based + * on the Melbourne quota system as used on BSD derived systems. The internal + * implementation is based on one of the several variants of the LINUX + * inode-subsystem with added complexity of the diskquota system. + * + * Author: Marco van Wieringen + * + * Fixes: Dmitry Gorodchanin , 11 Feb 96 + * + * Revised list management to avoid races + * -- Bill Hawes, , 9/98 + * + * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). + * As the consequence the locking was moved from dquot_decr_...(), + * dquot_incr_...() to calling functions. + * invalidate_dquots() now writes modified dquots. + * Serialized quota_off() and quota_on() for mount point. + * Fixed a few bugs in grow_dquots(). + * Fixed deadlock in write_dquot() - we no longer account quotas on + * quota files + * remove_dquot_ref() moved to inode.c - it now traverses through inodes + * add_dquot_ref() restarts after blocking + * Added check for bogus uid and fixed check for group in quotactl. + * Jan Kara, , sponsored by SuSE CR, 10-11/99 + * + * Used struct list_head instead of own list struct + * Invalidation of referenced dquots is no longer possible + * Improved free_dquots list management + * Quota and i_blocks are now updated in one place to avoid races + * Warnings are now delayed so we won't block in critical section + * Write updated not to require dquot lock + * Jan Kara, , 9/2000 + * + * Added dynamic quota structure allocation + * Jan Kara 12/2000 + * + * Rewritten quota interface. Implemented new quota format and + * formats registering. + * Jan Kara, , 2001,2002 + * + * New SMP locking. + * Jan Kara, , 10/2002 + * + * Added journalled quota support, fix lock inversion problems + * Jan Kara, , 2003,2004 + * + * (C) Copyright 1994 - 1997 Marco van Wieringen + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for inode_lock, oddly enough.. */ +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE +#include +#include +#endif + +#include + +#define __DQUOT_PARANOIA + +/* + * There are three quota SMP locks. dq_list_lock protects all lists with quotas + * and quota formats, dqstats structure containing statistics about the lists + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and + * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. + * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly + * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects + * modifications of quota state (on quotaon and quotaoff) and readers who care + * about latest values take it as well. + * + * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, + * dq_list_lock > dq_state_lock + * + * Note that some things (eg. sb pointer, type, id) doesn't change during + * the life of the dquot structure and so needn't to be protected by a lock + * + * Any operation working on dquots via inode pointers must hold dqptr_sem. If + * operation is just reading pointers from inode (or not using them at all) the + * read lock is enough. If pointers are altered function must hold write lock + * (these locking rules also apply for S_NOQUOTA flag in the inode - note that + * for altering the flag i_mutex is also needed). + * + * Each dquot has its dq_lock mutex. Locked dquots might not be referenced + * from inodes (dquot_alloc_space() and such don't check the dq_lock). + * Currently dquot is locked only when it is being read to memory (or space for + * it is being allocated) on the first dqget() and when it is being released on + * the last dqput(). The allocation and release oparations are serialized by + * the dq_lock and by checking the use count in dquot_release(). Write + * operations on dquots don't hold dq_lock as they copy data under dq_data_lock + * spinlock to internal buffers before writing. + * + * Lock ordering (including related VFS locks) is the following: + * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > + * dqio_mutex + * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > + * dqptr_sem. But filesystem has to count with the fact that functions such as + * dquot_alloc_space() acquire dqptr_sem and they usually have to be called + * from inside a transaction to keep filesystem consistency after a crash. Also + * filesystems usually want to do some IO on dquot from ->mark_dirty which is + * called with dqptr_sem held. + * i_mutex on quota files is special (it's below dqio_mutex) + */ + +static DEFINE_SPINLOCK(dq_list_lock); +static DEFINE_SPINLOCK(dq_state_lock); +DEFINE_SPINLOCK(dq_data_lock); +EXPORT_SYMBOL(dq_data_lock); + +static char *quotatypes[] = INITQFNAMES; +static struct quota_format_type *quota_formats; /* List of registered formats */ +static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; + +/* SLAB cache for dquot structures */ +static struct kmem_cache *dquot_cachep; + +int register_quota_format(struct quota_format_type *fmt) +{ + spin_lock(&dq_list_lock); + fmt->qf_next = quota_formats; + quota_formats = fmt; + spin_unlock(&dq_list_lock); + return 0; +} +EXPORT_SYMBOL(register_quota_format); + +void unregister_quota_format(struct quota_format_type *fmt) +{ + struct quota_format_type **actqf; + + spin_lock(&dq_list_lock); + for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); + if (*actqf) + *actqf = (*actqf)->qf_next; + spin_unlock(&dq_list_lock); +} +EXPORT_SYMBOL(unregister_quota_format); + +static struct quota_format_type *find_quota_format(int id) +{ + struct quota_format_type *actqf; + + spin_lock(&dq_list_lock); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + if (!actqf || !try_module_get(actqf->qf_owner)) { + int qm; + + spin_unlock(&dq_list_lock); + + for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); + if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) + return NULL; + + spin_lock(&dq_list_lock); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + if (actqf && !try_module_get(actqf->qf_owner)) + actqf = NULL; + } + spin_unlock(&dq_list_lock); + return actqf; +} + +static void put_quota_format(struct quota_format_type *fmt) +{ + module_put(fmt->qf_owner); +} + +/* + * Dquot List Management: + * The quota code uses three lists for dquot management: the inuse_list, + * free_dquots, and dquot_hash[] array. A single dquot structure may be + * on all three lists, depending on its current state. + * + * All dquots are placed to the end of inuse_list when first created, and this + * list is used for invalidate operation, which must look at every dquot. + * + * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, + * and this list is searched whenever we need an available dquot. Dquots are + * removed from the list as soon as they are used again, and + * dqstats.free_dquots gives the number of dquots on the list. When + * dquot is invalidated it's completely released from memory. + * + * Dquots with a specific identity (device, type and id) are placed on + * one of the dquot_hash[] hash chains. The provides an efficient search + * mechanism to locate a specific dquot. + */ + +static LIST_HEAD(inuse_list); +static LIST_HEAD(free_dquots); +static unsigned int dq_hash_bits, dq_hash_mask; +static struct hlist_head *dquot_hash; + +struct dqstats dqstats; +EXPORT_SYMBOL(dqstats); + +static inline unsigned int +hashfn(const struct super_block *sb, unsigned int id, int type) +{ + unsigned long tmp; + + tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); + return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; +} + +/* + * Following list functions expect dq_list_lock to be held + */ +static inline void insert_dquot_hash(struct dquot *dquot) +{ + struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); + hlist_add_head(&dquot->dq_hash, head); +} + +static inline void remove_dquot_hash(struct dquot *dquot) +{ + hlist_del_init(&dquot->dq_hash); +} + +static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) +{ + struct hlist_node *node; + struct dquot *dquot; + + hlist_for_each (node, dquot_hash+hashent) { + dquot = hlist_entry(node, struct dquot, dq_hash); + if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) + return dquot; + } + return NODQUOT; +} + +/* Add a dquot to the tail of the free list */ +static inline void put_dquot_last(struct dquot *dquot) +{ + list_add_tail(&dquot->dq_free, &free_dquots); + dqstats.free_dquots++; +} + +static inline void remove_free_dquot(struct dquot *dquot) +{ + if (list_empty(&dquot->dq_free)) + return; + list_del_init(&dquot->dq_free); + dqstats.free_dquots--; +} + +static inline void put_inuse(struct dquot *dquot) +{ + /* We add to the back of inuse list so we don't have to restart + * when traversing this list and we block */ + list_add_tail(&dquot->dq_inuse, &inuse_list); + dqstats.allocated_dquots++; +} + +static inline void remove_inuse(struct dquot *dquot) +{ + dqstats.allocated_dquots--; + list_del(&dquot->dq_inuse); +} +/* + * End of list functions needing dq_list_lock + */ + +static void wait_on_dquot(struct dquot *dquot) +{ + mutex_lock(&dquot->dq_lock); + mutex_unlock(&dquot->dq_lock); +} + +static inline int dquot_dirty(struct dquot *dquot) +{ + return test_bit(DQ_MOD_B, &dquot->dq_flags); +} + +static inline int mark_dquot_dirty(struct dquot *dquot) +{ + return dquot->dq_sb->dq_op->mark_dirty(dquot); +} + +int dquot_mark_dquot_dirty(struct dquot *dquot) +{ + spin_lock(&dq_list_lock); + if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) + list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> + info[dquot->dq_type].dqi_dirty_list); + spin_unlock(&dq_list_lock); + return 0; +} +EXPORT_SYMBOL(dquot_mark_dquot_dirty); + +/* This function needs dq_list_lock */ +static inline int clear_dquot_dirty(struct dquot *dquot) +{ + if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) + return 0; + list_del_init(&dquot->dq_dirty); + return 1; +} + +void mark_info_dirty(struct super_block *sb, int type) +{ + set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); +} +EXPORT_SYMBOL(mark_info_dirty); + +/* + * Read dquot from disk and alloc space for it + */ + +int dquot_acquire(struct dquot *dquot) +{ + int ret = 0, ret2 = 0; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + mutex_lock(&dquot->dq_lock); + mutex_lock(&dqopt->dqio_mutex); + if (!test_bit(DQ_READ_B, &dquot->dq_flags)) + ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); + if (ret < 0) + goto out_iolock; + set_bit(DQ_READ_B, &dquot->dq_flags); + /* Instantiate dquot if needed */ + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { + ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); + /* Write the info if needed */ + if (info_dirty(&dqopt->info[dquot->dq_type])) + ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (ret < 0) + goto out_iolock; + if (ret2 < 0) { + ret = ret2; + goto out_iolock; + } + } + set_bit(DQ_ACTIVE_B, &dquot->dq_flags); +out_iolock: + mutex_unlock(&dqopt->dqio_mutex); + mutex_unlock(&dquot->dq_lock); + return ret; +} +EXPORT_SYMBOL(dquot_acquire); + +/* + * Write dquot to disk + */ +int dquot_commit(struct dquot *dquot) +{ + int ret = 0, ret2 = 0; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + mutex_lock(&dqopt->dqio_mutex); + spin_lock(&dq_list_lock); + if (!clear_dquot_dirty(dquot)) { + spin_unlock(&dq_list_lock); + goto out_sem; + } + spin_unlock(&dq_list_lock); + /* Inactive dquot can be only if there was error during read/init + * => we have better not writing it */ + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { + ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); + if (info_dirty(&dqopt->info[dquot->dq_type])) + ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (ret >= 0) + ret = ret2; + } +out_sem: + mutex_unlock(&dqopt->dqio_mutex); + return ret; +} +EXPORT_SYMBOL(dquot_commit); + +/* + * Release dquot + */ +int dquot_release(struct dquot *dquot) +{ + int ret = 0, ret2 = 0; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + mutex_lock(&dquot->dq_lock); + /* Check whether we are not racing with some other dqget() */ + if (atomic_read(&dquot->dq_count) > 1) + goto out_dqlock; + mutex_lock(&dqopt->dqio_mutex); + if (dqopt->ops[dquot->dq_type]->release_dqblk) { + ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); + /* Write the info */ + if (info_dirty(&dqopt->info[dquot->dq_type])) + ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (ret >= 0) + ret = ret2; + } + clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); + mutex_unlock(&dqopt->dqio_mutex); +out_dqlock: + mutex_unlock(&dquot->dq_lock); + return ret; +} +EXPORT_SYMBOL(dquot_release); + +void dquot_destroy(struct dquot *dquot) +{ + kmem_cache_free(dquot_cachep, dquot); +} +EXPORT_SYMBOL(dquot_destroy); + +static inline void do_destroy_dquot(struct dquot *dquot) +{ + dquot->dq_sb->dq_op->destroy_dquot(dquot); +} + +/* Invalidate all dquots on the list. Note that this function is called after + * quota is disabled and pointers from inodes removed so there cannot be new + * quota users. There can still be some users of quotas due to inodes being + * just deleted or pruned by prune_icache() (those are not attached to any + * list) or parallel quotactl call. We have to wait for such users. + */ +static void invalidate_dquots(struct super_block *sb, int type) +{ + struct dquot *dquot, *tmp; + +restart: + spin_lock(&dq_list_lock); + list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { + if (dquot->dq_sb != sb) + continue; + if (dquot->dq_type != type) + continue; + /* Wait for dquot users */ + if (atomic_read(&dquot->dq_count)) { + DEFINE_WAIT(wait); + + atomic_inc(&dquot->dq_count); + prepare_to_wait(&dquot->dq_wait_unused, &wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&dq_list_lock); + /* Once dqput() wakes us up, we know it's time to free + * the dquot. + * IMPORTANT: we rely on the fact that there is always + * at most one process waiting for dquot to free. + * Otherwise dq_count would be > 1 and we would never + * wake up. + */ + if (atomic_read(&dquot->dq_count) > 1) + schedule(); + finish_wait(&dquot->dq_wait_unused, &wait); + dqput(dquot); + /* At this moment dquot() need not exist (it could be + * reclaimed by prune_dqcache(). Hence we must + * restart. */ + goto restart; + } + /* + * Quota now has no users and it has been written on last + * dqput() + */ + remove_dquot_hash(dquot); + remove_free_dquot(dquot); + remove_inuse(dquot); + do_destroy_dquot(dquot); + } + spin_unlock(&dq_list_lock); +} + +/* Call callback for every active dquot on given filesystem */ +int dquot_scan_active(struct super_block *sb, + int (*fn)(struct dquot *dquot, unsigned long priv), + unsigned long priv) +{ + struct dquot *dquot, *old_dquot = NULL; + int ret = 0; + + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + spin_lock(&dq_list_lock); + list_for_each_entry(dquot, &inuse_list, dq_inuse) { + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) + continue; + if (dquot->dq_sb != sb) + continue; + /* Now we have active dquot so we can just increase use count */ + atomic_inc(&dquot->dq_count); + dqstats.lookups++; + spin_unlock(&dq_list_lock); + dqput(old_dquot); + old_dquot = dquot; + ret = fn(dquot, priv); + if (ret < 0) + goto out; + spin_lock(&dq_list_lock); + /* We are safe to continue now because our dquot could not + * be moved out of the inuse list while we hold the reference */ + } + spin_unlock(&dq_list_lock); +out: + dqput(old_dquot); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return ret; +} +EXPORT_SYMBOL(dquot_scan_active); + +int vfs_quota_sync(struct super_block *sb, int type) +{ + struct list_head *dirty; + struct dquot *dquot; + struct quota_info *dqopt = sb_dqopt(sb); + int cnt; + + mutex_lock(&dqopt->dqonoff_mutex); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + if (!sb_has_quota_active(sb, cnt)) + continue; + spin_lock(&dq_list_lock); + dirty = &dqopt->info[cnt].dqi_dirty_list; + while (!list_empty(dirty)) { + dquot = list_first_entry(dirty, struct dquot, dq_dirty); + /* Dirty and inactive can be only bad dquot... */ + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { + clear_dquot_dirty(dquot); + continue; + } + /* Now we have active dquot from which someone is + * holding reference so we can safely just increase + * use count */ + atomic_inc(&dquot->dq_count); + dqstats.lookups++; + spin_unlock(&dq_list_lock); + sb->dq_op->write_dquot(dquot); + dqput(dquot); + spin_lock(&dq_list_lock); + } + spin_unlock(&dq_list_lock); + } + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) + && info_dirty(&dqopt->info[cnt])) + sb->dq_op->write_info(sb, cnt); + spin_lock(&dq_list_lock); + dqstats.syncs++; + spin_unlock(&dq_list_lock); + mutex_unlock(&dqopt->dqonoff_mutex); + + return 0; +} +EXPORT_SYMBOL(vfs_quota_sync); + +/* Free unused dquots from cache */ +static void prune_dqcache(int count) +{ + struct list_head *head; + struct dquot *dquot; + + head = free_dquots.prev; + while (head != &free_dquots && count) { + dquot = list_entry(head, struct dquot, dq_free); + remove_dquot_hash(dquot); + remove_free_dquot(dquot); + remove_inuse(dquot); + do_destroy_dquot(dquot); + count--; + head = free_dquots.prev; + } +} + +/* + * This is called from kswapd when we think we need some + * more memory + */ + +static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) +{ + if (nr) { + spin_lock(&dq_list_lock); + prune_dqcache(nr); + spin_unlock(&dq_list_lock); + } + return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; +} + +static struct shrinker dqcache_shrinker = { + .shrink = shrink_dqcache_memory, + .seeks = DEFAULT_SEEKS, +}; + +/* + * Put reference to dquot + * NOTE: If you change this function please check whether dqput_blocks() works right... + */ +void dqput(struct dquot *dquot) +{ + int ret; + + if (!dquot) + return; +#ifdef __DQUOT_PARANOIA + if (!atomic_read(&dquot->dq_count)) { + printk("VFS: dqput: trying to free free dquot\n"); + printk("VFS: device %s, dquot of %s %d\n", + dquot->dq_sb->s_id, + quotatypes[dquot->dq_type], + dquot->dq_id); + BUG(); + } +#endif + + spin_lock(&dq_list_lock); + dqstats.drops++; + spin_unlock(&dq_list_lock); +we_slept: + spin_lock(&dq_list_lock); + if (atomic_read(&dquot->dq_count) > 1) { + /* We have more than one user... nothing to do */ + atomic_dec(&dquot->dq_count); + /* Releasing dquot during quotaoff phase? */ + if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && + atomic_read(&dquot->dq_count) == 1) + wake_up(&dquot->dq_wait_unused); + spin_unlock(&dq_list_lock); + return; + } + /* Need to release dquot? */ + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { + spin_unlock(&dq_list_lock); + /* Commit dquot before releasing */ + ret = dquot->dq_sb->dq_op->write_dquot(dquot); + if (ret < 0) { + printk(KERN_ERR "VFS: cannot write quota structure on " + "device %s (error %d). Quota may get out of " + "sync!\n", dquot->dq_sb->s_id, ret); + /* + * We clear dirty bit anyway, so that we avoid + * infinite loop here + */ + spin_lock(&dq_list_lock); + clear_dquot_dirty(dquot); + spin_unlock(&dq_list_lock); + } + goto we_slept; + } + /* Clear flag in case dquot was inactive (something bad happened) */ + clear_dquot_dirty(dquot); + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { + spin_unlock(&dq_list_lock); + dquot->dq_sb->dq_op->release_dquot(dquot); + goto we_slept; + } + atomic_dec(&dquot->dq_count); +#ifdef __DQUOT_PARANOIA + /* sanity check */ + BUG_ON(!list_empty(&dquot->dq_free)); +#endif + put_dquot_last(dquot); + spin_unlock(&dq_list_lock); +} +EXPORT_SYMBOL(dqput); + +struct dquot *dquot_alloc(struct super_block *sb, int type) +{ + return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); +} +EXPORT_SYMBOL(dquot_alloc); + +static struct dquot *get_empty_dquot(struct super_block *sb, int type) +{ + struct dquot *dquot; + + dquot = sb->dq_op->alloc_dquot(sb, type); + if(!dquot) + return NODQUOT; + + mutex_init(&dquot->dq_lock); + INIT_LIST_HEAD(&dquot->dq_free); + INIT_LIST_HEAD(&dquot->dq_inuse); + INIT_HLIST_NODE(&dquot->dq_hash); + INIT_LIST_HEAD(&dquot->dq_dirty); + init_waitqueue_head(&dquot->dq_wait_unused); + dquot->dq_sb = sb; + dquot->dq_type = type; + atomic_set(&dquot->dq_count, 1); + + return dquot; +} + +/* + * Get reference to dquot + * + * Locking is slightly tricky here. We are guarded from parallel quotaoff() + * destroying our dquot by: + * a) checking for quota flags under dq_list_lock and + * b) getting a reference to dquot before we release dq_list_lock + */ +struct dquot *dqget(struct super_block *sb, unsigned int id, int type) +{ + unsigned int hashent = hashfn(sb, id, type); + struct dquot *dquot = NODQUOT, *empty = NODQUOT; + + if (!sb_has_quota_active(sb, type)) + return NODQUOT; +we_slept: + spin_lock(&dq_list_lock); + spin_lock(&dq_state_lock); + if (!sb_has_quota_active(sb, type)) { + spin_unlock(&dq_state_lock); + spin_unlock(&dq_list_lock); + goto out; + } + spin_unlock(&dq_state_lock); + + if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { + if (empty == NODQUOT) { + spin_unlock(&dq_list_lock); + if ((empty = get_empty_dquot(sb, type)) == NODQUOT) + schedule(); /* Try to wait for a moment... */ + goto we_slept; + } + dquot = empty; + empty = NODQUOT; + dquot->dq_id = id; + /* all dquots go on the inuse_list */ + put_inuse(dquot); + /* hash it first so it can be found */ + insert_dquot_hash(dquot); + dqstats.lookups++; + spin_unlock(&dq_list_lock); + } else { + if (!atomic_read(&dquot->dq_count)) + remove_free_dquot(dquot); + atomic_inc(&dquot->dq_count); + dqstats.cache_hits++; + dqstats.lookups++; + spin_unlock(&dq_list_lock); + } + /* Wait for dq_lock - after this we know that either dquot_release() is already + * finished or it will be canceled due to dq_count > 1 test */ + wait_on_dquot(dquot); + /* Read the dquot and instantiate it (everything done only if needed) */ + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { + dqput(dquot); + dquot = NODQUOT; + goto out; + } +#ifdef __DQUOT_PARANOIA + BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ +#endif +out: + if (empty) + do_destroy_dquot(empty); + + return dquot; +} +EXPORT_SYMBOL(dqget); + +static int dqinit_needed(struct inode *inode, int type) +{ + int cnt; + + if (IS_NOQUOTA(inode)) + return 0; + if (type != -1) + return inode->i_dquot[type] == NODQUOT; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt] == NODQUOT) + return 1; + return 0; +} + +/* This routine is guarded by dqonoff_mutex mutex */ +static void add_dquot_ref(struct super_block *sb, int type) +{ + struct inode *inode, *old_inode = NULL; + + spin_lock(&inode_lock); + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + if (!atomic_read(&inode->i_writecount)) + continue; + if (!dqinit_needed(inode, type)) + continue; + if (inode->i_state & (I_FREEING|I_WILL_FREE)) + continue; + + __iget(inode); + spin_unlock(&inode_lock); + + iput(old_inode); + sb->dq_op->initialize(inode, type); + /* We hold a reference to 'inode' so it couldn't have been + * removed from s_inodes list while we dropped the inode_lock. + * We cannot iput the inode now as we can be holding the last + * reference and we cannot iput it under inode_lock. So we + * keep the reference and iput it later. */ + old_inode = inode; + spin_lock(&inode_lock); + } + spin_unlock(&inode_lock); + iput(old_inode); +} + +/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ +static inline int dqput_blocks(struct dquot *dquot) +{ + if (atomic_read(&dquot->dq_count) <= 1) + return 1; + return 0; +} + +/* Remove references to dquots from inode - add dquot to list for freeing if needed */ +/* We can't race with anybody because we hold dqptr_sem for writing... */ +static int remove_inode_dquot_ref(struct inode *inode, int type, + struct list_head *tofree_head) +{ + struct dquot *dquot = inode->i_dquot[type]; + + inode->i_dquot[type] = NODQUOT; + if (dquot != NODQUOT) { + if (dqput_blocks(dquot)) { +#ifdef __DQUOT_PARANOIA + if (atomic_read(&dquot->dq_count) != 1) + printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); +#endif + spin_lock(&dq_list_lock); + list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ + spin_unlock(&dq_list_lock); + return 1; + } + else + dqput(dquot); /* We have guaranteed we won't block */ + } + return 0; +} + +/* Free list of dquots - called from inode.c */ +/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ +static void put_dquot_list(struct list_head *tofree_head) +{ + struct list_head *act_head; + struct dquot *dquot; + + act_head = tofree_head->next; + /* So now we have dquots on the list... Just free them */ + while (act_head != tofree_head) { + dquot = list_entry(act_head, struct dquot, dq_free); + act_head = act_head->next; + list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ + dqput(dquot); + } +} + +static void remove_dquot_ref(struct super_block *sb, int type, + struct list_head *tofree_head) +{ + struct inode *inode; + + spin_lock(&inode_lock); + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + if (!IS_NOQUOTA(inode)) + remove_inode_dquot_ref(inode, type, tofree_head); + } + spin_unlock(&inode_lock); +} + +/* Gather all references from inodes and drop them */ +static void drop_dquot_ref(struct super_block *sb, int type) +{ + LIST_HEAD(tofree_head); + + if (sb->dq_op) { + down_write(&sb_dqopt(sb)->dqptr_sem); + remove_dquot_ref(sb, type, &tofree_head); + up_write(&sb_dqopt(sb)->dqptr_sem); + put_dquot_list(&tofree_head); + } +} + +static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_curinodes += number; +} + +static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_curspace += number; +} + +static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_rsvspace += number; +} + +/* + * Claim reserved quota space + */ +static void dquot_claim_reserved_space(struct dquot *dquot, + qsize_t number) +{ + WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); + dquot->dq_dqb.dqb_curspace += number; + dquot->dq_dqb.dqb_rsvspace -= number; +} + +static inline +void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_rsvspace -= number; +} + +static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) +{ + if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || + dquot->dq_dqb.dqb_curinodes >= number) + dquot->dq_dqb.dqb_curinodes -= number; + else + dquot->dq_dqb.dqb_curinodes = 0; + if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) + dquot->dq_dqb.dqb_itime = (time_t) 0; + clear_bit(DQ_INODES_B, &dquot->dq_flags); +} + +static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) +{ + if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || + dquot->dq_dqb.dqb_curspace >= number) + dquot->dq_dqb.dqb_curspace -= number; + else + dquot->dq_dqb.dqb_curspace = 0; + if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) + dquot->dq_dqb.dqb_btime = (time_t) 0; + clear_bit(DQ_BLKS_B, &dquot->dq_flags); +} + +static int warning_issued(struct dquot *dquot, const int warntype) +{ + int flag = (warntype == QUOTA_NL_BHARDWARN || + warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : + ((warntype == QUOTA_NL_IHARDWARN || + warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); + + if (!flag) + return 0; + return test_and_set_bit(flag, &dquot->dq_flags); +} + +#ifdef CONFIG_PRINT_QUOTA_WARNING +static int flag_print_warnings = 1; + +static inline int need_print_warning(struct dquot *dquot) +{ + if (!flag_print_warnings) + return 0; + + switch (dquot->dq_type) { + case USRQUOTA: + return current_fsuid() == dquot->dq_id; + case GRPQUOTA: + return in_group_p(dquot->dq_id); + } + return 0; +} + +/* Print warning to user which exceeded quota */ +static void print_warning(struct dquot *dquot, const int warntype) +{ + char *msg = NULL; + struct tty_struct *tty; + + if (warntype == QUOTA_NL_IHARDBELOW || + warntype == QUOTA_NL_ISOFTBELOW || + warntype == QUOTA_NL_BHARDBELOW || + warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) + return; + + tty = get_current_tty(); + if (!tty) + return; + tty_write_message(tty, dquot->dq_sb->s_id); + if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) + tty_write_message(tty, ": warning, "); + else + tty_write_message(tty, ": write failed, "); + tty_write_message(tty, quotatypes[dquot->dq_type]); + switch (warntype) { + case QUOTA_NL_IHARDWARN: + msg = " file limit reached.\r\n"; + break; + case QUOTA_NL_ISOFTLONGWARN: + msg = " file quota exceeded too long.\r\n"; + break; + case QUOTA_NL_ISOFTWARN: + msg = " file quota exceeded.\r\n"; + break; + case QUOTA_NL_BHARDWARN: + msg = " block limit reached.\r\n"; + break; + case QUOTA_NL_BSOFTLONGWARN: + msg = " block quota exceeded too long.\r\n"; + break; + case QUOTA_NL_BSOFTWARN: + msg = " block quota exceeded.\r\n"; + break; + } + tty_write_message(tty, msg); + tty_kref_put(tty); +} +#endif + +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE + +/* Netlink family structure for quota */ +static struct genl_family quota_genl_family = { + .id = GENL_ID_GENERATE, + .hdrsize = 0, + .name = "VFS_DQUOT", + .version = 1, + .maxattr = QUOTA_NL_A_MAX, +}; + +/* Send warning to userspace about user which exceeded quota */ +static void send_warning(const struct dquot *dquot, const char warntype) +{ + static atomic_t seq; + struct sk_buff *skb; + void *msg_head; + int ret; + int msg_size = 4 * nla_total_size(sizeof(u32)) + + 2 * nla_total_size(sizeof(u64)); + + /* We have to allocate using GFP_NOFS as we are called from a + * filesystem performing write and thus further recursion into + * the fs to free some data could cause deadlocks. */ + skb = genlmsg_new(msg_size, GFP_NOFS); + if (!skb) { + printk(KERN_ERR + "VFS: Not enough memory to send quota warning.\n"); + return; + } + msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), + "a_genl_family, 0, QUOTA_NL_C_WARNING); + if (!msg_head) { + printk(KERN_ERR + "VFS: Cannot store netlink header in quota warning.\n"); + goto err_out; + } + ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type); + if (ret) + goto attr_err_out; + ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id); + if (ret) + goto attr_err_out; + ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); + if (ret) + goto attr_err_out; + ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, + MAJOR(dquot->dq_sb->s_dev)); + if (ret) + goto attr_err_out; + ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, + MINOR(dquot->dq_sb->s_dev)); + if (ret) + goto attr_err_out; + ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); + if (ret) + goto attr_err_out; + genlmsg_end(skb, msg_head); + + ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); + if (ret < 0 && ret != -ESRCH) + printk(KERN_ERR + "VFS: Failed to send notification message: %d\n", ret); + return; +attr_err_out: + printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); +err_out: + kfree_skb(skb); +} +#endif +/* + * Write warnings to the console and send warning messages over netlink. + * + * Note that this function can sleep. + */ +static inline void flush_warnings(struct dquot * const *dquots, char *warntype) +{ + int i; + + for (i = 0; i < MAXQUOTAS; i++) + if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && + !warning_issued(dquots[i], warntype[i])) { +#ifdef CONFIG_PRINT_QUOTA_WARNING + print_warning(dquots[i], warntype[i]); +#endif +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE + send_warning(dquots[i], warntype[i]); +#endif + } +} + +static inline char ignore_hardlimit(struct dquot *dquot) +{ + struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; + + return capable(CAP_SYS_RESOURCE) && + (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); +} + +/* needs dq_data_lock */ +static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) +{ + *warntype = QUOTA_NL_NOWARN; + if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || + test_bit(DQ_FAKE_B, &dquot->dq_flags)) + return QUOTA_OK; + + if (dquot->dq_dqb.dqb_ihardlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && + !ignore_hardlimit(dquot)) { + *warntype = QUOTA_NL_IHARDWARN; + return NO_QUOTA; + } + + if (dquot->dq_dqb.dqb_isoftlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && + !ignore_hardlimit(dquot)) { + *warntype = QUOTA_NL_ISOFTLONGWARN; + return NO_QUOTA; + } + + if (dquot->dq_dqb.dqb_isoftlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime == 0) { + *warntype = QUOTA_NL_ISOFTWARN; + dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; + } + + return QUOTA_OK; +} + +/* needs dq_data_lock */ +static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) +{ + qsize_t tspace; + + *warntype = QUOTA_NL_NOWARN; + if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || + test_bit(DQ_FAKE_B, &dquot->dq_flags)) + return QUOTA_OK; + + tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace + + space; + + if (dquot->dq_dqb.dqb_bhardlimit && + tspace > dquot->dq_dqb.dqb_bhardlimit && + !ignore_hardlimit(dquot)) { + if (!prealloc) + *warntype = QUOTA_NL_BHARDWARN; + return NO_QUOTA; + } + + if (dquot->dq_dqb.dqb_bsoftlimit && + tspace > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && + !ignore_hardlimit(dquot)) { + if (!prealloc) + *warntype = QUOTA_NL_BSOFTLONGWARN; + return NO_QUOTA; + } + + if (dquot->dq_dqb.dqb_bsoftlimit && + tspace > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_btime == 0) { + if (!prealloc) { + *warntype = QUOTA_NL_BSOFTWARN; + dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; + } + else + /* + * We don't allow preallocation to exceed softlimit so exceeding will + * be always printed + */ + return NO_QUOTA; + } + + return QUOTA_OK; +} + +static int info_idq_free(struct dquot *dquot, qsize_t inodes) +{ + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || + dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || + !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) + return QUOTA_NL_NOWARN; + + if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) + return QUOTA_NL_ISOFTBELOW; + if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && + dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) + return QUOTA_NL_IHARDBELOW; + return QUOTA_NL_NOWARN; +} + +static int info_bdq_free(struct dquot *dquot, qsize_t space) +{ + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || + dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) + return QUOTA_NL_NOWARN; + + if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) + return QUOTA_NL_BSOFTBELOW; + if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && + dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) + return QUOTA_NL_BHARDBELOW; + return QUOTA_NL_NOWARN; +} +/* + * Initialize quota pointers in inode + * We do things in a bit complicated way but by that we avoid calling + * dqget() and thus filesystem callbacks under dqptr_sem. + */ +int dquot_initialize(struct inode *inode, int type) +{ + unsigned int id = 0; + int cnt, ret = 0; + struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; + struct super_block *sb = inode->i_sb; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) + return 0; + + /* First get references to structures we might need. */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + switch (cnt) { + case USRQUOTA: + id = inode->i_uid; + break; + case GRPQUOTA: + id = inode->i_gid; + break; + } + got[cnt] = dqget(sb, id, cnt); + } + + down_write(&sb_dqopt(sb)->dqptr_sem); + /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ + if (IS_NOQUOTA(inode)) + goto out_err; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + /* Avoid races with quotaoff() */ + if (!sb_has_quota_active(sb, cnt)) + continue; + if (inode->i_dquot[cnt] == NODQUOT) { + inode->i_dquot[cnt] = got[cnt]; + got[cnt] = NODQUOT; + } + } +out_err: + up_write(&sb_dqopt(sb)->dqptr_sem); + /* Drop unused references */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + dqput(got[cnt]); + return ret; +} +EXPORT_SYMBOL(dquot_initialize); + +/* + * Release all quotas referenced by inode + */ +int dquot_drop(struct inode *inode) +{ + int cnt; + struct dquot *put[MAXQUOTAS]; + + down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + put[cnt] = inode->i_dquot[cnt]; + inode->i_dquot[cnt] = NODQUOT; + } + up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + dqput(put[cnt]); + return 0; +} +EXPORT_SYMBOL(dquot_drop); + +/* Wrapper to remove references to quota structures from inode */ +void vfs_dq_drop(struct inode *inode) +{ + /* Here we can get arbitrary inode from clear_inode() so we have + * to be careful. OTOH we don't need locking as quota operations + * are allowed to change only at mount time */ + if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op + && inode->i_sb->dq_op->drop) { + int cnt; + /* Test before calling to rule out calls from proc and such + * where we are not allowed to block. Note that this is + * actually reliable test even without the lock - the caller + * must assure that nobody can come after the DQUOT_DROP and + * add quota pointers back anyway */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt] != NODQUOT) + break; + if (cnt < MAXQUOTAS) + inode->i_sb->dq_op->drop(inode); + } +} +EXPORT_SYMBOL(vfs_dq_drop); + +/* + * Following four functions update i_blocks+i_bytes fields and + * quota information (together with appropriate checks) + * NOTE: We absolutely rely on the fact that caller dirties + * the inode (usually macros in quotaops.h care about this) and + * holds a handle for the current transaction so that dquot write and + * inode write go into the same transaction. + */ + +/* + * This operation can block, but only after everything is updated + */ +int __dquot_alloc_space(struct inode *inode, qsize_t number, + int warn, int reserve) +{ + int cnt, ret = QUOTA_OK; + char warntype[MAXQUOTAS]; + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + warntype[cnt] = QUOTA_NL_NOWARN; + + spin_lock(&dq_data_lock); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) + == NO_QUOTA) { + ret = NO_QUOTA; + goto out_unlock; + } + } + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + if (reserve) + dquot_resv_space(inode->i_dquot[cnt], number); + else + dquot_incr_space(inode->i_dquot[cnt], number); + } + if (!reserve) + inode_add_bytes(inode, number); +out_unlock: + spin_unlock(&dq_data_lock); + flush_warnings(inode->i_dquot, warntype); + return ret; +} + +int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) +{ + int cnt, ret = QUOTA_OK; + + /* + * First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex + */ + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out_unlock; + } + + ret = __dquot_alloc_space(inode, number, warn, 0); + if (ret == NO_QUOTA) + goto out_unlock; + + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} +EXPORT_SYMBOL(dquot_alloc_space); + +int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) +{ + int ret = QUOTA_OK; + + if (IS_NOQUOTA(inode)) + goto out; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) + goto out_unlock; + + ret = __dquot_alloc_space(inode, number, warn, 1); +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} +EXPORT_SYMBOL(dquot_reserve_space); + +/* + * This operation can block, but only after everything is updated + */ +int dquot_alloc_inode(const struct inode *inode, qsize_t number) +{ + int cnt, ret = NO_QUOTA; + char warntype[MAXQUOTAS]; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) + return QUOTA_OK; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + warntype[cnt] = QUOTA_NL_NOWARN; + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return QUOTA_OK; + } + spin_lock(&dq_data_lock); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) + goto warn_put_all; + } + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + dquot_incr_inodes(inode->i_dquot[cnt], number); + } + ret = QUOTA_OK; +warn_put_all: + spin_unlock(&dq_data_lock); + if (ret == QUOTA_OK) + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + flush_warnings(inode->i_dquot, warntype); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return ret; +} +EXPORT_SYMBOL(dquot_alloc_inode); + +int dquot_claim_space(struct inode *inode, qsize_t number) +{ + int cnt; + int ret = QUOTA_OK; + + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + inode_add_bytes(inode, number); + goto out; + } + + spin_lock(&dq_data_lock); + /* Claim reserved quotas to allocated quotas */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT) + dquot_claim_reserved_space(inode->i_dquot[cnt], + number); + } + /* Update inode bytes */ + inode_add_bytes(inode, number); + spin_unlock(&dq_data_lock); + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} +EXPORT_SYMBOL(dquot_claim_space); + +/* + * Release reserved quota space + */ +void dquot_release_reserved_space(struct inode *inode, qsize_t number) +{ + int cnt; + + if (IS_NOQUOTA(inode)) + goto out; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) + goto out_unlock; + + spin_lock(&dq_data_lock); + /* Release reserved dquots */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT) + dquot_free_reserved_space(inode->i_dquot[cnt], number); + } + spin_unlock(&dq_data_lock); + +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return; +} +EXPORT_SYMBOL(dquot_release_reserved_space); + +/* + * This operation can block, but only after everything is updated + */ +int dquot_free_space(struct inode *inode, qsize_t number) +{ + unsigned int cnt; + char warntype[MAXQUOTAS]; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) { +out_sub: + inode_sub_bytes(inode, number); + return QUOTA_OK; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + /* Now recheck reliably when holding dqptr_sem */ + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + goto out_sub; + } + spin_lock(&dq_data_lock); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); + dquot_decr_space(inode->i_dquot[cnt], number); + } + inode_sub_bytes(inode, number); + spin_unlock(&dq_data_lock); + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + flush_warnings(inode->i_dquot, warntype); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return QUOTA_OK; +} +EXPORT_SYMBOL(dquot_free_space); + +/* + * This operation can block, but only after everything is updated + */ +int dquot_free_inode(const struct inode *inode, qsize_t number) +{ + unsigned int cnt; + char warntype[MAXQUOTAS]; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) + return QUOTA_OK; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + /* Now recheck reliably when holding dqptr_sem */ + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return QUOTA_OK; + } + spin_lock(&dq_data_lock); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); + dquot_decr_inodes(inode->i_dquot[cnt], number); + } + spin_unlock(&dq_data_lock); + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + flush_warnings(inode->i_dquot, warntype); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return QUOTA_OK; +} +EXPORT_SYMBOL(dquot_free_inode); + +/* + * call back function, get reserved quota space from underlying fs + */ +qsize_t dquot_get_reserved_space(struct inode *inode) +{ + qsize_t reserved_space = 0; + + if (sb_any_quota_active(inode->i_sb) && + inode->i_sb->dq_op->get_reserved_space) + reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); + return reserved_space; +} + +/* + * Transfer the number of inode and blocks from one diskquota to an other. + * + * This operation can block, but only after everything is updated + * A transaction must be started when entering this function. + */ +int dquot_transfer(struct inode *inode, struct iattr *iattr) +{ + qsize_t space, cur_space; + qsize_t rsv_space = 0; + struct dquot *transfer_from[MAXQUOTAS]; + struct dquot *transfer_to[MAXQUOTAS]; + int cnt, ret = QUOTA_OK; + int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, + chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; + char warntype_to[MAXQUOTAS]; + char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) + return QUOTA_OK; + /* Initialize the arrays */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + transfer_from[cnt] = NODQUOT; + transfer_to[cnt] = NODQUOT; + warntype_to[cnt] = QUOTA_NL_NOWARN; + switch (cnt) { + case USRQUOTA: + if (!chuid) + continue; + transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); + break; + case GRPQUOTA: + if (!chgid) + continue; + transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); + break; + } + } + + down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + /* Now recheck reliably when holding dqptr_sem */ + if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ + up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + goto put_all; + } + spin_lock(&dq_data_lock); + cur_space = inode_get_bytes(inode); + rsv_space = dquot_get_reserved_space(inode); + space = cur_space + rsv_space; + /* Build the transfer_from list and check the limits */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (transfer_to[cnt] == NODQUOT) + continue; + transfer_from[cnt] = inode->i_dquot[cnt]; + if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == + NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, + warntype_to + cnt) == NO_QUOTA) + goto over_quota; + } + + /* + * Finally perform the needed transfer from transfer_from to transfer_to + */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + /* + * Skip changes for same uid or gid or for turned off quota-type. + */ + if (transfer_to[cnt] == NODQUOT) + continue; + + /* Due to IO error we might not have transfer_from[] structure */ + if (transfer_from[cnt]) { + warntype_from_inodes[cnt] = + info_idq_free(transfer_from[cnt], 1); + warntype_from_space[cnt] = + info_bdq_free(transfer_from[cnt], space); + dquot_decr_inodes(transfer_from[cnt], 1); + dquot_decr_space(transfer_from[cnt], cur_space); + dquot_free_reserved_space(transfer_from[cnt], + rsv_space); + } + + dquot_incr_inodes(transfer_to[cnt], 1); + dquot_incr_space(transfer_to[cnt], cur_space); + dquot_resv_space(transfer_to[cnt], rsv_space); + + inode->i_dquot[cnt] = transfer_to[cnt]; + } + spin_unlock(&dq_data_lock); + up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (transfer_from[cnt]) + mark_dquot_dirty(transfer_from[cnt]); + if (transfer_to[cnt]) { + mark_dquot_dirty(transfer_to[cnt]); + /* The reference we got is transferred to the inode */ + transfer_to[cnt] = NODQUOT; + } + } +warn_put_all: + flush_warnings(transfer_to, warntype_to); + flush_warnings(transfer_from, warntype_from_inodes); + flush_warnings(transfer_from, warntype_from_space); +put_all: + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + dqput(transfer_from[cnt]); + dqput(transfer_to[cnt]); + } + return ret; +over_quota: + spin_unlock(&dq_data_lock); + up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + /* Clear dquot pointers we don't want to dqput() */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + transfer_from[cnt] = NODQUOT; + ret = NO_QUOTA; + goto warn_put_all; +} +EXPORT_SYMBOL(dquot_transfer); + +/* Wrapper for transferring ownership of an inode */ +int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) +{ + if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { + vfs_dq_init(inode); + if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) + return 1; + } + return 0; +} +EXPORT_SYMBOL(vfs_dq_transfer); + +/* + * Write info of quota file to disk + */ +int dquot_commit_info(struct super_block *sb, int type) +{ + int ret; + struct quota_info *dqopt = sb_dqopt(sb); + + mutex_lock(&dqopt->dqio_mutex); + ret = dqopt->ops[type]->write_file_info(sb, type); + mutex_unlock(&dqopt->dqio_mutex); + return ret; +} +EXPORT_SYMBOL(dquot_commit_info); + +/* + * Definitions of diskquota operations. + */ +struct dquot_operations dquot_operations = { + .initialize = dquot_initialize, + .drop = dquot_drop, + .alloc_space = dquot_alloc_space, + .alloc_inode = dquot_alloc_inode, + .free_space = dquot_free_space, + .free_inode = dquot_free_inode, + .transfer = dquot_transfer, + .write_dquot = dquot_commit, + .acquire_dquot = dquot_acquire, + .release_dquot = dquot_release, + .mark_dirty = dquot_mark_dquot_dirty, + .write_info = dquot_commit_info, + .alloc_dquot = dquot_alloc, + .destroy_dquot = dquot_destroy, +}; + +/* + * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) + */ +int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) +{ + int cnt, ret = 0; + struct quota_info *dqopt = sb_dqopt(sb); + struct inode *toputinode[MAXQUOTAS]; + + /* Cannot turn off usage accounting without turning off limits, or + * suspend quotas and simultaneously turn quotas off. */ + if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) + || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | + DQUOT_USAGE_ENABLED))) + return -EINVAL; + + /* We need to serialize quota_off() for device */ + mutex_lock(&dqopt->dqonoff_mutex); + + /* + * Skip everything if there's nothing to do. We have to do this because + * sometimes we are called when fill_super() failed and calling + * sync_fs() in such cases does no good. + */ + if (!sb_any_quota_loaded(sb)) { + mutex_unlock(&dqopt->dqonoff_mutex); + return 0; + } + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + toputinode[cnt] = NULL; + if (type != -1 && cnt != type) + continue; + if (!sb_has_quota_loaded(sb, cnt)) + continue; + + if (flags & DQUOT_SUSPENDED) { + spin_lock(&dq_state_lock); + dqopt->flags |= + dquot_state_flag(DQUOT_SUSPENDED, cnt); + spin_unlock(&dq_state_lock); + } else { + spin_lock(&dq_state_lock); + dqopt->flags &= ~dquot_state_flag(flags, cnt); + /* Turning off suspended quotas? */ + if (!sb_has_quota_loaded(sb, cnt) && + sb_has_quota_suspended(sb, cnt)) { + dqopt->flags &= ~dquot_state_flag( + DQUOT_SUSPENDED, cnt); + spin_unlock(&dq_state_lock); + iput(dqopt->files[cnt]); + dqopt->files[cnt] = NULL; + continue; + } + spin_unlock(&dq_state_lock); + } + + /* We still have to keep quota loaded? */ + if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) + continue; + + /* Note: these are blocking operations */ + drop_dquot_ref(sb, cnt); + invalidate_dquots(sb, cnt); + /* + * Now all dquots should be invalidated, all writes done so we should be only + * users of the info. No locks needed. + */ + if (info_dirty(&dqopt->info[cnt])) + sb->dq_op->write_info(sb, cnt); + if (dqopt->ops[cnt]->free_file_info) + dqopt->ops[cnt]->free_file_info(sb, cnt); + put_quota_format(dqopt->info[cnt].dqi_format); + + toputinode[cnt] = dqopt->files[cnt]; + if (!sb_has_quota_loaded(sb, cnt)) + dqopt->files[cnt] = NULL; + dqopt->info[cnt].dqi_flags = 0; + dqopt->info[cnt].dqi_igrace = 0; + dqopt->info[cnt].dqi_bgrace = 0; + dqopt->ops[cnt] = NULL; + } + mutex_unlock(&dqopt->dqonoff_mutex); + + /* Skip syncing and setting flags if quota files are hidden */ + if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) + goto put_inodes; + + /* Sync the superblock so that buffers with quota data are written to + * disk (and so userspace sees correct data afterwards). */ + if (sb->s_op->sync_fs) + sb->s_op->sync_fs(sb, 1); + sync_blockdev(sb->s_bdev); + /* Now the quota files are just ordinary files and we can set the + * inode flags back. Moreover we discard the pagecache so that + * userspace sees the writes we did bypassing the pagecache. We + * must also discard the blockdev buffers so that we see the + * changes done by userspace on the next quotaon() */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (toputinode[cnt]) { + mutex_lock(&dqopt->dqonoff_mutex); + /* If quota was reenabled in the meantime, we have + * nothing to do */ + if (!sb_has_quota_loaded(sb, cnt)) { + mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); + toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | + S_NOATIME | S_NOQUOTA); + truncate_inode_pages(&toputinode[cnt]->i_data, 0); + mutex_unlock(&toputinode[cnt]->i_mutex); + mark_inode_dirty(toputinode[cnt]); + } + mutex_unlock(&dqopt->dqonoff_mutex); + } + if (sb->s_bdev) + invalidate_bdev(sb->s_bdev); +put_inodes: + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (toputinode[cnt]) { + /* On remount RO, we keep the inode pointer so that we + * can reenable quota on the subsequent remount RW. We + * have to check 'flags' variable and not use sb_has_ + * function because another quotaon / quotaoff could + * change global state before we got here. We refuse + * to suspend quotas when there is pending delete on + * the quota file... */ + if (!(flags & DQUOT_SUSPENDED)) + iput(toputinode[cnt]); + else if (!toputinode[cnt]->i_nlink) + ret = -EBUSY; + } + return ret; +} +EXPORT_SYMBOL(vfs_quota_disable); + +int vfs_quota_off(struct super_block *sb, int type, int remount) +{ + return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : + (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); +} +EXPORT_SYMBOL(vfs_quota_off); +/* + * Turn quotas on on a device + */ + +/* + * Helper function to turn quotas on when we already have the inode of + * quota file and no quota information is loaded. + */ +static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, + unsigned int flags) +{ + struct quota_format_type *fmt = find_quota_format(format_id); + struct super_block *sb = inode->i_sb; + struct quota_info *dqopt = sb_dqopt(sb); + int error; + int oldflags = -1; + + if (!fmt) + return -ESRCH; + if (!S_ISREG(inode->i_mode)) { + error = -EACCES; + goto out_fmt; + } + if (IS_RDONLY(inode)) { + error = -EROFS; + goto out_fmt; + } + if (!sb->s_op->quota_write || !sb->s_op->quota_read) { + error = -EINVAL; + goto out_fmt; + } + /* Usage always has to be set... */ + if (!(flags & DQUOT_USAGE_ENABLED)) { + error = -EINVAL; + goto out_fmt; + } + + if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { + /* As we bypass the pagecache we must now flush the inode so + * that we see all the changes from userspace... */ + write_inode_now(inode, 1); + /* And now flush the block cache so that kernel sees the + * changes */ + invalidate_bdev(sb->s_bdev); + } + mutex_lock(&inode->i_mutex); + mutex_lock(&dqopt->dqonoff_mutex); + if (sb_has_quota_loaded(sb, type)) { + error = -EBUSY; + goto out_lock; + } + + if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { + /* We don't want quota and atime on quota files (deadlocks + * possible) Also nobody should write to the file - we use + * special IO operations which ignore the immutable bit. */ + down_write(&dqopt->dqptr_sem); + oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); + inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; + up_write(&dqopt->dqptr_sem); + sb->dq_op->drop(inode); + } + + error = -EIO; + dqopt->files[type] = igrab(inode); + if (!dqopt->files[type]) + goto out_lock; + error = -EINVAL; + if (!fmt->qf_ops->check_quota_file(sb, type)) + goto out_file_init; + + dqopt->ops[type] = fmt->qf_ops; + dqopt->info[type].dqi_format = fmt; + dqopt->info[type].dqi_fmt_id = format_id; + INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); + mutex_lock(&dqopt->dqio_mutex); + if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { + mutex_unlock(&dqopt->dqio_mutex); + goto out_file_init; + } + mutex_unlock(&dqopt->dqio_mutex); + mutex_unlock(&inode->i_mutex); + spin_lock(&dq_state_lock); + dqopt->flags |= dquot_state_flag(flags, type); + spin_unlock(&dq_state_lock); + + add_dquot_ref(sb, type); + mutex_unlock(&dqopt->dqonoff_mutex); + + return 0; + +out_file_init: + dqopt->files[type] = NULL; + iput(inode); +out_lock: + mutex_unlock(&dqopt->dqonoff_mutex); + if (oldflags != -1) { + down_write(&dqopt->dqptr_sem); + /* Set the flags back (in the case of accidental quotaon() + * on a wrong file we don't want to mess up the flags) */ + inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); + inode->i_flags |= oldflags; + up_write(&dqopt->dqptr_sem); + } + mutex_unlock(&inode->i_mutex); +out_fmt: + put_quota_format(fmt); + + return error; +} + +/* Reenable quotas on remount RW */ +static int vfs_quota_on_remount(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + struct inode *inode; + int ret; + unsigned int flags; + + mutex_lock(&dqopt->dqonoff_mutex); + if (!sb_has_quota_suspended(sb, type)) { + mutex_unlock(&dqopt->dqonoff_mutex); + return 0; + } + inode = dqopt->files[type]; + dqopt->files[type] = NULL; + spin_lock(&dq_state_lock); + flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED, type); + dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); + spin_unlock(&dq_state_lock); + mutex_unlock(&dqopt->dqonoff_mutex); + + flags = dquot_generic_flag(flags, type); + ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, + flags); + iput(inode); + + return ret; +} + +int vfs_quota_on_path(struct super_block *sb, int type, int format_id, + struct path *path) +{ + int error = security_quota_on(path->dentry); + if (error) + return error; + /* Quota file not on the same filesystem? */ + if (path->mnt->mnt_sb != sb) + error = -EXDEV; + else + error = vfs_load_quota_inode(path->dentry->d_inode, type, + format_id, DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED); + return error; +} +EXPORT_SYMBOL(vfs_quota_on_path); + +int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, + int remount) +{ + struct path path; + int error; + + if (remount) + return vfs_quota_on_remount(sb, type); + + error = kern_path(name, LOOKUP_FOLLOW, &path); + if (!error) { + error = vfs_quota_on_path(sb, type, format_id, &path); + path_put(&path); + } + return error; +} +EXPORT_SYMBOL(vfs_quota_on); + +/* + * More powerful function for turning on quotas allowing setting + * of individual quota flags + */ +int vfs_quota_enable(struct inode *inode, int type, int format_id, + unsigned int flags) +{ + int ret = 0; + struct super_block *sb = inode->i_sb; + struct quota_info *dqopt = sb_dqopt(sb); + + /* Just unsuspend quotas? */ + if (flags & DQUOT_SUSPENDED) + return vfs_quota_on_remount(sb, type); + if (!flags) + return 0; + /* Just updating flags needed? */ + if (sb_has_quota_loaded(sb, type)) { + mutex_lock(&dqopt->dqonoff_mutex); + /* Now do a reliable test... */ + if (!sb_has_quota_loaded(sb, type)) { + mutex_unlock(&dqopt->dqonoff_mutex); + goto load_quota; + } + if (flags & DQUOT_USAGE_ENABLED && + sb_has_quota_usage_enabled(sb, type)) { + ret = -EBUSY; + goto out_lock; + } + if (flags & DQUOT_LIMITS_ENABLED && + sb_has_quota_limits_enabled(sb, type)) { + ret = -EBUSY; + goto out_lock; + } + spin_lock(&dq_state_lock); + sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); + spin_unlock(&dq_state_lock); +out_lock: + mutex_unlock(&dqopt->dqonoff_mutex); + return ret; + } + +load_quota: + return vfs_load_quota_inode(inode, type, format_id, flags); +} +EXPORT_SYMBOL(vfs_quota_enable); + +/* + * This function is used when filesystem needs to initialize quotas + * during mount time. + */ +int vfs_quota_on_mount(struct super_block *sb, char *qf_name, + int format_id, int type) +{ + struct dentry *dentry; + int error; + + dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + if (!dentry->d_inode) { + error = -ENOENT; + goto out; + } + + error = security_quota_on(dentry); + if (!error) + error = vfs_load_quota_inode(dentry->d_inode, type, format_id, + DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); + +out: + dput(dentry); + return error; +} +EXPORT_SYMBOL(vfs_quota_on_mount); + +/* Wrapper to turn on quotas when remounting rw */ +int vfs_dq_quota_on_remount(struct super_block *sb) +{ + int cnt; + int ret = 0, err; + + if (!sb->s_qcop || !sb->s_qcop->quota_on) + return -ENOSYS; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); + if (err < 0 && !ret) + ret = err; + } + return ret; +} +EXPORT_SYMBOL(vfs_dq_quota_on_remount); + +static inline qsize_t qbtos(qsize_t blocks) +{ + return blocks << QIF_DQBLKSIZE_BITS; +} + +static inline qsize_t stoqb(qsize_t space) +{ + return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; +} + +/* Generic routine for getting common part of quota structure */ +static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) +{ + struct mem_dqblk *dm = &dquot->dq_dqb; + + spin_lock(&dq_data_lock); + di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); + di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); + di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; + di->dqb_ihardlimit = dm->dqb_ihardlimit; + di->dqb_isoftlimit = dm->dqb_isoftlimit; + di->dqb_curinodes = dm->dqb_curinodes; + di->dqb_btime = dm->dqb_btime; + di->dqb_itime = dm->dqb_itime; + di->dqb_valid = QIF_ALL; + spin_unlock(&dq_data_lock); +} + +int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +{ + struct dquot *dquot; + + dquot = dqget(sb, id, type); + if (dquot == NODQUOT) + return -ESRCH; + do_get_dqblk(dquot, di); + dqput(dquot); + + return 0; +} +EXPORT_SYMBOL(vfs_get_dqblk); + +/* Generic routine for setting common part of quota structure */ +static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) +{ + struct mem_dqblk *dm = &dquot->dq_dqb; + int check_blim = 0, check_ilim = 0; + struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; + + if ((di->dqb_valid & QIF_BLIMITS && + (di->dqb_bhardlimit > dqi->dqi_maxblimit || + di->dqb_bsoftlimit > dqi->dqi_maxblimit)) || + (di->dqb_valid & QIF_ILIMITS && + (di->dqb_ihardlimit > dqi->dqi_maxilimit || + di->dqb_isoftlimit > dqi->dqi_maxilimit))) + return -ERANGE; + + spin_lock(&dq_data_lock); + if (di->dqb_valid & QIF_SPACE) { + dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; + check_blim = 1; + __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_BLIMITS) { + dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); + dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); + check_blim = 1; + __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_INODES) { + dm->dqb_curinodes = di->dqb_curinodes; + check_ilim = 1; + __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_ILIMITS) { + dm->dqb_isoftlimit = di->dqb_isoftlimit; + dm->dqb_ihardlimit = di->dqb_ihardlimit; + check_ilim = 1; + __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_BTIME) { + dm->dqb_btime = di->dqb_btime; + check_blim = 1; + __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_ITIME) { + dm->dqb_itime = di->dqb_itime; + check_ilim = 1; + __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); + } + + if (check_blim) { + if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { + dm->dqb_btime = 0; + clear_bit(DQ_BLKS_B, &dquot->dq_flags); + } + else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ + dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; + } + if (check_ilim) { + if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { + dm->dqb_itime = 0; + clear_bit(DQ_INODES_B, &dquot->dq_flags); + } + else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ + dm->dqb_itime = get_seconds() + dqi->dqi_igrace; + } + if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) + clear_bit(DQ_FAKE_B, &dquot->dq_flags); + else + set_bit(DQ_FAKE_B, &dquot->dq_flags); + spin_unlock(&dq_data_lock); + mark_dquot_dirty(dquot); + + return 0; +} + +int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +{ + struct dquot *dquot; + int rc; + + dquot = dqget(sb, id, type); + if (!dquot) { + rc = -ESRCH; + goto out; + } + rc = do_set_dqblk(dquot, di); + dqput(dquot); +out: + return rc; +} +EXPORT_SYMBOL(vfs_set_dqblk); + +/* Generic routine for getting common part of quota file information */ +int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +{ + struct mem_dqinfo *mi; + + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + if (!sb_has_quota_active(sb, type)) { + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return -ESRCH; + } + mi = sb_dqopt(sb)->info + type; + spin_lock(&dq_data_lock); + ii->dqi_bgrace = mi->dqi_bgrace; + ii->dqi_igrace = mi->dqi_igrace; + ii->dqi_flags = mi->dqi_flags & DQF_MASK; + ii->dqi_valid = IIF_ALL; + spin_unlock(&dq_data_lock); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return 0; +} +EXPORT_SYMBOL(vfs_get_dqinfo); + +/* Generic routine for setting common part of quota file information */ +int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +{ + struct mem_dqinfo *mi; + int err = 0; + + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + if (!sb_has_quota_active(sb, type)) { + err = -ESRCH; + goto out; + } + mi = sb_dqopt(sb)->info + type; + spin_lock(&dq_data_lock); + if (ii->dqi_valid & IIF_BGRACE) + mi->dqi_bgrace = ii->dqi_bgrace; + if (ii->dqi_valid & IIF_IGRACE) + mi->dqi_igrace = ii->dqi_igrace; + if (ii->dqi_valid & IIF_FLAGS) + mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); + spin_unlock(&dq_data_lock); + mark_info_dirty(sb, type); + /* Force write to disk */ + sb->dq_op->write_info(sb, type); +out: + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return err; +} +EXPORT_SYMBOL(vfs_set_dqinfo); + +struct quotactl_ops vfs_quotactl_ops = { + .quota_on = vfs_quota_on, + .quota_off = vfs_quota_off, + .quota_sync = vfs_quota_sync, + .get_info = vfs_get_dqinfo, + .set_info = vfs_set_dqinfo, + .get_dqblk = vfs_get_dqblk, + .set_dqblk = vfs_set_dqblk +}; + +static ctl_table fs_dqstats_table[] = { + { + .ctl_name = FS_DQ_LOOKUPS, + .procname = "lookups", + .data = &dqstats.lookups, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_DROPS, + .procname = "drops", + .data = &dqstats.drops, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_READS, + .procname = "reads", + .data = &dqstats.reads, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_WRITES, + .procname = "writes", + .data = &dqstats.writes, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_CACHE_HITS, + .procname = "cache_hits", + .data = &dqstats.cache_hits, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_ALLOCATED, + .procname = "allocated_dquots", + .data = &dqstats.allocated_dquots, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_FREE, + .procname = "free_dquots", + .data = &dqstats.free_dquots, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_SYNCS, + .procname = "syncs", + .data = &dqstats.syncs, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, +#ifdef CONFIG_PRINT_QUOTA_WARNING + { + .ctl_name = FS_DQ_WARNINGS, + .procname = "warnings", + .data = &flag_print_warnings, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif + { .ctl_name = 0 }, +}; + +static ctl_table fs_table[] = { + { + .ctl_name = FS_DQSTATS, + .procname = "quota", + .mode = 0555, + .child = fs_dqstats_table, + }, + { .ctl_name = 0 }, +}; + +static ctl_table sys_table[] = { + { + .ctl_name = CTL_FS, + .procname = "fs", + .mode = 0555, + .child = fs_table, + }, + { .ctl_name = 0 }, +}; + +static int __init dquot_init(void) +{ + int i; + unsigned long nr_hash, order; + + printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); + + register_sysctl_table(sys_table); + + dquot_cachep = kmem_cache_create("dquot", + sizeof(struct dquot), sizeof(unsigned long) * 4, + (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD|SLAB_PANIC), + NULL); + + order = 0; + dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); + if (!dquot_hash) + panic("Cannot create dquot hash table"); + + /* Find power-of-two hlist_heads which can fit into allocation */ + nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); + dq_hash_bits = 0; + do { + dq_hash_bits++; + } while (nr_hash >> dq_hash_bits); + dq_hash_bits--; + + nr_hash = 1UL << dq_hash_bits; + dq_hash_mask = nr_hash - 1; + for (i = 0; i < nr_hash; i++) + INIT_HLIST_HEAD(dquot_hash + i); + + printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", + nr_hash, order, (PAGE_SIZE << order)); + + register_shrinker(&dqcache_shrinker); + +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE + if (genl_register_family("a_genl_family) != 0) + printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); +#endif + + return 0; +} +module_init(dquot_init); diff --git a/fs/quota/quota.c b/fs/quota/quota.c new file mode 100644 index 00000000000..d76ada914f9 --- /dev/null +++ b/fs/quota/quota.c @@ -0,0 +1,513 @@ +/* + * Quota code necessary even when VFS quota support is not compiled + * into the kernel. The interesting stuff is over in dquot.c, here + * we have symbols for initial quotactl(2) handling, the sysctl(2) + * variables, etc - things needed even when quota support disabled. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Check validity of generic quotactl commands */ +static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + if (type >= MAXQUOTAS) + return -EINVAL; + if (!sb && cmd != Q_SYNC) + return -ENODEV; + /* Is operation supported? */ + if (sb && !sb->s_qcop) + return -ENOSYS; + + switch (cmd) { + case Q_GETFMT: + break; + case Q_QUOTAON: + if (!sb->s_qcop->quota_on) + return -ENOSYS; + break; + case Q_QUOTAOFF: + if (!sb->s_qcop->quota_off) + return -ENOSYS; + break; + case Q_SETINFO: + if (!sb->s_qcop->set_info) + return -ENOSYS; + break; + case Q_GETINFO: + if (!sb->s_qcop->get_info) + return -ENOSYS; + break; + case Q_SETQUOTA: + if (!sb->s_qcop->set_dqblk) + return -ENOSYS; + break; + case Q_GETQUOTA: + if (!sb->s_qcop->get_dqblk) + return -ENOSYS; + break; + case Q_SYNC: + if (sb && !sb->s_qcop->quota_sync) + return -ENOSYS; + break; + default: + return -EINVAL; + } + + /* Is quota turned on for commands which need it? */ + switch (cmd) { + case Q_GETFMT: + case Q_GETINFO: + case Q_SETINFO: + case Q_SETQUOTA: + case Q_GETQUOTA: + /* This is just informative test so we are satisfied without a lock */ + if (!sb_has_quota_active(sb, type)) + return -ESRCH; + } + + /* Check privileges */ + if (cmd == Q_GETQUOTA) { + if (((type == USRQUOTA && current_euid() != id) || + (type == GRPQUOTA && !in_egroup_p(id))) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } + else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO) + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return 0; +} + +/* Check validity of XFS Quota Manager commands */ +static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + if (type >= XQM_MAXQUOTAS) + return -EINVAL; + if (!sb) + return -ENODEV; + if (!sb->s_qcop) + return -ENOSYS; + + switch (cmd) { + case Q_XQUOTAON: + case Q_XQUOTAOFF: + case Q_XQUOTARM: + if (!sb->s_qcop->set_xstate) + return -ENOSYS; + break; + case Q_XGETQSTAT: + if (!sb->s_qcop->get_xstate) + return -ENOSYS; + break; + case Q_XSETQLIM: + if (!sb->s_qcop->set_xquota) + return -ENOSYS; + break; + case Q_XGETQUOTA: + if (!sb->s_qcop->get_xquota) + return -ENOSYS; + break; + case Q_XQUOTASYNC: + if (!sb->s_qcop->quota_sync) + return -ENOSYS; + break; + default: + return -EINVAL; + } + + /* Check privileges */ + if (cmd == Q_XGETQUOTA) { + if (((type == XQM_USRQUOTA && current_euid() != id) || + (type == XQM_GRPQUOTA && !in_egroup_p(id))) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + } + + return 0; +} + +static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + int error; + + if (XQM_COMMAND(cmd)) + error = xqm_quotactl_valid(sb, type, cmd, id); + else + error = generic_quotactl_valid(sb, type, cmd, id); + if (!error) + error = security_quotactl(cmd, type, id, sb); + return error; +} + +static void quota_sync_sb(struct super_block *sb, int type) +{ + int cnt; + + sb->s_qcop->quota_sync(sb, type); + + if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE) + return; + /* This is not very clever (and fast) but currently I don't know about + * any other simple way of getting quota data to disk and we must get + * them there for userspace to be visible... */ + if (sb->s_op->sync_fs) + sb->s_op->sync_fs(sb, 1); + sync_blockdev(sb->s_bdev); + + /* + * Now when everything is written we can discard the pagecache so + * that userspace sees the changes. + */ + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + if (!sb_has_quota_active(sb, cnt)) + continue; + mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); + truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); + mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); + } + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); +} + +void sync_dquots(struct super_block *sb, int type) +{ + int cnt; + + if (sb) { + if (sb->s_qcop->quota_sync) + quota_sync_sb(sb, type); + return; + } + + spin_lock(&sb_lock); +restart: + list_for_each_entry(sb, &super_blocks, s_list) { + /* This test just improves performance so it needn't be reliable... */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && type != cnt) + continue; + if (!sb_has_quota_active(sb, cnt)) + continue; + if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && + list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) + continue; + break; + } + if (cnt == MAXQUOTAS) + continue; + sb->s_count++; + spin_unlock(&sb_lock); + down_read(&sb->s_umount); + if (sb->s_root && sb->s_qcop->quota_sync) + quota_sync_sb(sb, type); + up_read(&sb->s_umount); + spin_lock(&sb_lock); + if (__put_super_and_need_restart(sb)) + goto restart; + } + spin_unlock(&sb_lock); +} + +/* Copy parameters and call proper function */ +static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) +{ + int ret; + + switch (cmd) { + case Q_QUOTAON: { + char *pathname; + + if (IS_ERR(pathname = getname(addr))) + return PTR_ERR(pathname); + ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); + putname(pathname); + return ret; + } + case Q_QUOTAOFF: + return sb->s_qcop->quota_off(sb, type, 0); + + case Q_GETFMT: { + __u32 fmt; + + down_read(&sb_dqopt(sb)->dqptr_sem); + if (!sb_has_quota_active(sb, type)) { + up_read(&sb_dqopt(sb)->dqptr_sem); + return -ESRCH; + } + fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; + up_read(&sb_dqopt(sb)->dqptr_sem); + if (copy_to_user(addr, &fmt, sizeof(fmt))) + return -EFAULT; + return 0; + } + case Q_GETINFO: { + struct if_dqinfo info; + + if ((ret = sb->s_qcop->get_info(sb, type, &info))) + return ret; + if (copy_to_user(addr, &info, sizeof(info))) + return -EFAULT; + return 0; + } + case Q_SETINFO: { + struct if_dqinfo info; + + if (copy_from_user(&info, addr, sizeof(info))) + return -EFAULT; + return sb->s_qcop->set_info(sb, type, &info); + } + case Q_GETQUOTA: { + struct if_dqblk idq; + + if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) + return ret; + if (copy_to_user(addr, &idq, sizeof(idq))) + return -EFAULT; + return 0; + } + case Q_SETQUOTA: { + struct if_dqblk idq; + + if (copy_from_user(&idq, addr, sizeof(idq))) + return -EFAULT; + return sb->s_qcop->set_dqblk(sb, type, id, &idq); + } + case Q_SYNC: + sync_dquots(sb, type); + return 0; + + case Q_XQUOTAON: + case Q_XQUOTAOFF: + case Q_XQUOTARM: { + __u32 flags; + + if (copy_from_user(&flags, addr, sizeof(flags))) + return -EFAULT; + return sb->s_qcop->set_xstate(sb, flags, cmd); + } + case Q_XGETQSTAT: { + struct fs_quota_stat fqs; + + if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) + return ret; + if (copy_to_user(addr, &fqs, sizeof(fqs))) + return -EFAULT; + return 0; + } + case Q_XSETQLIM: { + struct fs_disk_quota fdq; + + if (copy_from_user(&fdq, addr, sizeof(fdq))) + return -EFAULT; + return sb->s_qcop->set_xquota(sb, type, id, &fdq); + } + case Q_XGETQUOTA: { + struct fs_disk_quota fdq; + + if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) + return ret; + if (copy_to_user(addr, &fdq, sizeof(fdq))) + return -EFAULT; + return 0; + } + case Q_XQUOTASYNC: + return sb->s_qcop->quota_sync(sb, type); + /* We never reach here unless validity check is broken */ + default: + BUG(); + } + return 0; +} + +/* + * look up a superblock on which quota ops will be performed + * - use the name of a block device to find the superblock thereon + */ +static inline struct super_block *quotactl_block(const char __user *special) +{ +#ifdef CONFIG_BLOCK + struct block_device *bdev; + struct super_block *sb; + char *tmp = getname(special); + + if (IS_ERR(tmp)) + return ERR_CAST(tmp); + bdev = lookup_bdev(tmp); + putname(tmp); + if (IS_ERR(bdev)) + return ERR_CAST(bdev); + sb = get_super(bdev); + bdput(bdev); + if (!sb) + return ERR_PTR(-ENODEV); + + return sb; +#else + return ERR_PTR(-ENODEV); +#endif +} + +/* + * This is the system call interface. This communicates with + * the user-level programs. Currently this only supports diskquota + * calls. Maybe we need to add the process quotas etc. in the future, + * but we probably should use rlimits for that. + */ +SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, + qid_t, id, void __user *, addr) +{ + uint cmds, type; + struct super_block *sb = NULL; + int ret; + + cmds = cmd >> SUBCMDSHIFT; + type = cmd & SUBCMDMASK; + + if (cmds != Q_SYNC || special) { + sb = quotactl_block(special); + if (IS_ERR(sb)) + return PTR_ERR(sb); + } + + ret = check_quotactl_valid(sb, type, cmds, id); + if (ret >= 0) + ret = do_quotactl(sb, type, cmds, id, addr); + if (sb) + drop_super(sb); + + return ret; +} + +#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT) +/* + * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) + * and is necessary due to alignment problems. + */ +struct compat_if_dqblk { + compat_u64 dqb_bhardlimit; + compat_u64 dqb_bsoftlimit; + compat_u64 dqb_curspace; + compat_u64 dqb_ihardlimit; + compat_u64 dqb_isoftlimit; + compat_u64 dqb_curinodes; + compat_u64 dqb_btime; + compat_u64 dqb_itime; + compat_uint_t dqb_valid; +}; + +/* XFS structures */ +struct compat_fs_qfilestat { + compat_u64 dqb_bhardlimit; + compat_u64 qfs_nblks; + compat_uint_t qfs_nextents; +}; + +struct compat_fs_quota_stat { + __s8 qs_version; + __u16 qs_flags; + __s8 qs_pad; + struct compat_fs_qfilestat qs_uquota; + struct compat_fs_qfilestat qs_gquota; + compat_uint_t qs_incoredqs; + compat_int_t qs_btimelimit; + compat_int_t qs_itimelimit; + compat_int_t qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; +}; + +asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, + qid_t id, void __user *addr) +{ + unsigned int cmds; + struct if_dqblk __user *dqblk; + struct compat_if_dqblk __user *compat_dqblk; + struct fs_quota_stat __user *fsqstat; + struct compat_fs_quota_stat __user *compat_fsqstat; + compat_uint_t data; + u16 xdata; + long ret; + + cmds = cmd >> SUBCMDSHIFT; + + switch (cmds) { + case Q_GETQUOTA: + dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); + compat_dqblk = addr; + ret = sys_quotactl(cmd, special, id, dqblk); + if (ret) + break; + if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || + get_user(data, &dqblk->dqb_valid) || + put_user(data, &compat_dqblk->dqb_valid)) + ret = -EFAULT; + break; + case Q_SETQUOTA: + dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); + compat_dqblk = addr; + ret = -EFAULT; + if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || + get_user(data, &compat_dqblk->dqb_valid) || + put_user(data, &dqblk->dqb_valid)) + break; + ret = sys_quotactl(cmd, special, id, dqblk); + break; + case Q_XGETQSTAT: + fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); + compat_fsqstat = addr; + ret = sys_quotactl(cmd, special, id, fsqstat); + if (ret) + break; + ret = -EFAULT; + /* Copying qs_version, qs_flags, qs_pad */ + if (copy_in_user(compat_fsqstat, fsqstat, + offsetof(struct compat_fs_quota_stat, qs_uquota))) + break; + /* Copying qs_uquota */ + if (copy_in_user(&compat_fsqstat->qs_uquota, + &fsqstat->qs_uquota, + sizeof(compat_fsqstat->qs_uquota)) || + get_user(data, &fsqstat->qs_uquota.qfs_nextents) || + put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) + break; + /* Copying qs_gquota */ + if (copy_in_user(&compat_fsqstat->qs_gquota, + &fsqstat->qs_gquota, + sizeof(compat_fsqstat->qs_gquota)) || + get_user(data, &fsqstat->qs_gquota.qfs_nextents) || + put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) + break; + /* Copying the rest */ + if (copy_in_user(&compat_fsqstat->qs_incoredqs, + &fsqstat->qs_incoredqs, + sizeof(struct compat_fs_quota_stat) - + offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || + get_user(xdata, &fsqstat->qs_iwarnlimit) || + put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) + break; + ret = 0; + break; + default: + ret = sys_quotactl(cmd, special, id, addr); + } + return ret; +} +#endif diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c new file mode 100644 index 00000000000..953404c95b1 --- /dev/null +++ b/fs/quota/quota_tree.c @@ -0,0 +1,645 @@ +/* + * vfsv0 quota IO operations on file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "quota_tree.h" + +MODULE_AUTHOR("Jan Kara"); +MODULE_DESCRIPTION("Quota trie support"); +MODULE_LICENSE("GPL"); + +#define __QUOTA_QT_PARANOIA + +typedef char *dqbuf_t; + +static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) +{ + unsigned int epb = info->dqi_usable_bs >> 2; + + depth = info->dqi_qtree_depth - depth - 1; + while (depth--) + id /= epb; + return id % epb; +} + +/* Number of entries in one blocks */ +static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) +{ + return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) + / info->dqi_entry_size; +} + +static dqbuf_t getdqbuf(size_t size) +{ + dqbuf_t buf = kmalloc(size, GFP_NOFS); + if (!buf) + printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); + return buf; +} + +static inline void freedqbuf(dqbuf_t buf) +{ + kfree(buf); +} + +static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +{ + struct super_block *sb = info->dqi_sb; + + memset(buf, 0, info->dqi_usable_bs); + return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, + info->dqi_usable_bs, blk << info->dqi_blocksize_bits); +} + +static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +{ + struct super_block *sb = info->dqi_sb; + + return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, + info->dqi_usable_bs, blk << info->dqi_blocksize_bits); +} + +/* Remove empty block from list and return it */ +static int get_free_dqblk(struct qtree_mem_dqinfo *info) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int ret, blk; + + if (!buf) + return -ENOMEM; + if (info->dqi_free_blk) { + blk = info->dqi_free_blk; + ret = read_blk(info, blk, buf); + if (ret < 0) + goto out_buf; + info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); + } + else { + memset(buf, 0, info->dqi_usable_bs); + /* Assure block allocation... */ + ret = write_blk(info, info->dqi_blocks, buf); + if (ret < 0) + goto out_buf; + blk = info->dqi_blocks++; + } + mark_info_dirty(info->dqi_sb, info->dqi_type); + ret = blk; +out_buf: + freedqbuf(buf); + return ret; +} + +/* Insert empty block to the list */ +static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int err; + + dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); + dh->dqdh_prev_free = cpu_to_le32(0); + dh->dqdh_entries = cpu_to_le16(0); + err = write_blk(info, blk, buf); + if (err < 0) + return err; + info->dqi_free_blk = blk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + return 0; +} + +/* Remove given block from the list of blocks with free entries */ +static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + uint nextblk = le32_to_cpu(dh->dqdh_next_free); + uint prevblk = le32_to_cpu(dh->dqdh_prev_free); + int err; + + if (!tmpbuf) + return -ENOMEM; + if (nextblk) { + err = read_blk(info, nextblk, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = + dh->dqdh_prev_free; + err = write_blk(info, nextblk, tmpbuf); + if (err < 0) + goto out_buf; + } + if (prevblk) { + err = read_blk(info, prevblk, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = + dh->dqdh_next_free; + err = write_blk(info, prevblk, tmpbuf); + if (err < 0) + goto out_buf; + } else { + info->dqi_free_entry = nextblk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + } + freedqbuf(tmpbuf); + dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); + /* No matter whether write succeeds block is out of list */ + if (write_blk(info, blk, buf) < 0) + printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Insert given block to the beginning of list with free entries */ +static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int err; + + if (!tmpbuf) + return -ENOMEM; + dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); + dh->dqdh_prev_free = cpu_to_le32(0); + err = write_blk(info, blk, buf); + if (err < 0) + goto out_buf; + if (info->dqi_free_entry) { + err = read_blk(info, info->dqi_free_entry, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = + cpu_to_le32(blk); + err = write_blk(info, info->dqi_free_entry, tmpbuf); + if (err < 0) + goto out_buf; + } + freedqbuf(tmpbuf); + info->dqi_free_entry = blk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Is the entry in the block free? */ +int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) +{ + int i; + + for (i = 0; i < info->dqi_entry_size; i++) + if (disk[i]) + return 0; + return 1; +} +EXPORT_SYMBOL(qtree_entry_unused); + +/* Find space for dquot */ +static uint find_free_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, int *err) +{ + uint blk, i; + struct qt_disk_dqdbheader *dh; + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *ddquot; + + *err = 0; + if (!buf) { + *err = -ENOMEM; + return 0; + } + dh = (struct qt_disk_dqdbheader *)buf; + if (info->dqi_free_entry) { + blk = info->dqi_free_entry; + *err = read_blk(info, blk, buf); + if (*err < 0) + goto out_buf; + } else { + blk = get_free_dqblk(info); + if ((int)blk < 0) { + *err = blk; + freedqbuf(buf); + return 0; + } + memset(buf, 0, info->dqi_usable_bs); + /* This is enough as block is already zeroed and entry list is empty... */ + info->dqi_free_entry = blk; + mark_info_dirty(dquot->dq_sb, dquot->dq_type); + } + /* Block will be full? */ + if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { + *err = remove_free_dqentry(info, buf, blk); + if (*err < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't " + "remove block (%u) from entry free list.\n", + blk); + goto out_buf; + } + } + le16_add_cpu(&dh->dqdh_entries, 1); + /* Find free structure in block */ + for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); + i++, ddquot += info->dqi_entry_size); +#ifdef __QUOTA_QT_PARANOIA + if (i == qtree_dqstr_in_blk(info)) { + printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " + "but it shouldn't.\n"); + *err = -EIO; + goto out_buf; + } +#endif + *err = write_blk(info, blk, buf); + if (*err < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota " + "data block %u.\n", blk); + goto out_buf; + } + dquot->dq_off = (blk << info->dqi_blocksize_bits) + + sizeof(struct qt_disk_dqdbheader) + + i * info->dqi_entry_size; + freedqbuf(buf); + return blk; +out_buf: + freedqbuf(buf); + return 0; +} + +/* Insert reference to structure into the trie */ +static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint *treeblk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0, newson = 0, newact = 0; + __le32 *ref; + uint newblk; + + if (!buf) + return -ENOMEM; + if (!*treeblk) { + ret = get_free_dqblk(info); + if (ret < 0) + goto out_buf; + *treeblk = ret; + memset(buf, 0, info->dqi_usable_bs); + newact = 1; + } else { + ret = read_blk(info, *treeblk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read tree quota block " + "%u.\n", *treeblk); + goto out_buf; + } + } + ref = (__le32 *)buf; + newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (!newblk) + newson = 1; + if (depth == info->dqi_qtree_depth - 1) { +#ifdef __QUOTA_QT_PARANOIA + if (newblk) { + printk(KERN_ERR "VFS: Inserting already present quota " + "entry (block %u).\n", + le32_to_cpu(ref[get_index(info, + dquot->dq_id, depth)])); + ret = -EIO; + goto out_buf; + } +#endif + newblk = find_free_dqentry(info, dquot, &ret); + } else { + ret = do_insert_tree(info, dquot, &newblk, depth+1); + } + if (newson && ret >= 0) { + ref[get_index(info, dquot->dq_id, depth)] = + cpu_to_le32(newblk); + ret = write_blk(info, *treeblk, buf); + } else if (newact && ret < 0) { + put_free_dqblk(info, buf, *treeblk); + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Wrapper for inserting quota structure into tree */ +static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, + struct dquot *dquot) +{ + int tmp = QT_TREEOFF; + return do_insert_tree(info, dquot, &tmp, 0); +} + +/* + * We don't have to be afraid of deadlocks as we never have quotas on quota files... + */ +int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + int type = dquot->dq_type; + struct super_block *sb = dquot->dq_sb; + ssize_t ret; + dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); + + if (!ddquot) + return -ENOMEM; + + /* dq_off is guarded by dqio_mutex */ + if (!dquot->dq_off) { + ret = dq_insert_tree(info, dquot); + if (ret < 0) { + printk(KERN_ERR "VFS: Error %zd occurred while " + "creating quota.\n", ret); + freedqbuf(ddquot); + return ret; + } + } + spin_lock(&dq_data_lock); + info->dqi_ops->mem2disk_dqblk(ddquot, dquot); + spin_unlock(&dq_data_lock); + ret = sb->s_op->quota_write(sb, type, (char *)ddquot, + info->dqi_entry_size, dquot->dq_off); + if (ret != info->dqi_entry_size) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", + sb->s_id); + if (ret >= 0) + ret = -ENOSPC; + } else { + ret = 0; + } + dqstats.writes++; + freedqbuf(ddquot); + + return ret; +} +EXPORT_SYMBOL(qtree_write_dquot); + +/* Free dquot entry in data block */ +static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint blk) +{ + struct qt_disk_dqdbheader *dh; + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0; + + if (!buf) + return -ENOMEM; + if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { + printk(KERN_ERR "VFS: Quota structure has offset to other " + "block (%u) than it should (%u).\n", blk, + (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); + goto out_buf; + } + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); + goto out_buf; + } + dh = (struct qt_disk_dqdbheader *)buf; + le16_add_cpu(&dh->dqdh_entries, -1); + if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ + ret = remove_free_dqentry(info, buf, blk); + if (ret >= 0) + ret = put_free_dqblk(info, buf, blk); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't move quota data block (%u) " + "to free list.\n", blk); + goto out_buf; + } + } else { + memset(buf + + (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), + 0, info->dqi_entry_size); + if (le16_to_cpu(dh->dqdh_entries) == + qtree_dqstr_in_blk(info) - 1) { + /* Insert will write block itself */ + ret = insert_free_dqentry(info, buf, blk); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't insert quota data " + "block (%u) to free entry list.\n", blk); + goto out_buf; + } + } else { + ret = write_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't write quota data " + "block %u\n", blk); + goto out_buf; + } + } + } + dquot->dq_off = 0; /* Quota is now unattached */ +out_buf: + freedqbuf(buf); + return ret; +} + +/* Remove reference to dquot from tree */ +static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint *blk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0; + uint newblk; + __le32 *ref = (__le32 *)buf; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, *blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); + goto out_buf; + } + newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (depth == info->dqi_qtree_depth - 1) { + ret = free_dqentry(info, dquot, newblk); + newblk = 0; + } else { + ret = remove_tree(info, dquot, &newblk, depth+1); + } + if (ret >= 0 && !newblk) { + int i; + ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); + /* Block got empty? */ + for (i = 0; + i < (info->dqi_usable_bs >> 2) && !ref[i]; + i++); + /* Don't put the root block into the free block list */ + if (i == (info->dqi_usable_bs >> 2) + && *blk != QT_TREEOFF) { + put_free_dqblk(info, buf, *blk); + *blk = 0; + } else { + ret = write_blk(info, *blk, buf); + if (ret < 0) + printk(KERN_ERR "VFS: Can't write quota tree " + "block %u.\n", *blk); + } + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Delete dquot from tree */ +int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + uint tmp = QT_TREEOFF; + + if (!dquot->dq_off) /* Even not allocated? */ + return 0; + return remove_tree(info, dquot, &tmp, 0); +} +EXPORT_SYMBOL(qtree_delete_dquot); + +/* Find entry in block */ +static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, uint blk) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + loff_t ret = 0; + int i; + char *ddquot; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); + i++, ddquot += info->dqi_entry_size); + if (i == qtree_dqstr_in_blk(info)) { + printk(KERN_ERR "VFS: Quota for id %u referenced " + "but not present.\n", dquot->dq_id); + ret = -EIO; + goto out_buf; + } else { + ret = (blk << info->dqi_blocksize_bits) + sizeof(struct + qt_disk_dqdbheader) + i * info->dqi_entry_size; + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree */ +static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, uint blk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + loff_t ret = 0; + __le32 *ref = (__le32 *)buf; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + ret = 0; + blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (!blk) /* No reference? */ + goto out_buf; + if (depth < info->dqi_qtree_depth - 1) + ret = find_tree_dqentry(info, dquot, blk, depth+1); + else + ret = find_block_dqentry(info, dquot, blk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree - wrapper function */ +static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot) +{ + return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); +} + +int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + int type = dquot->dq_type; + struct super_block *sb = dquot->dq_sb; + loff_t offset; + dqbuf_t ddquot; + int ret = 0; + +#ifdef __QUOTA_QT_PARANOIA + /* Invalidated quota? */ + if (!sb_dqopt(dquot->dq_sb)->files[type]) { + printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); + return -EIO; + } +#endif + /* Do we know offset of the dquot entry in the quota file? */ + if (!dquot->dq_off) { + offset = find_dqentry(info, dquot); + if (offset <= 0) { /* Entry not present? */ + if (offset < 0) + printk(KERN_ERR "VFS: Can't read quota " + "structure for id %u.\n", dquot->dq_id); + dquot->dq_off = 0; + set_bit(DQ_FAKE_B, &dquot->dq_flags); + memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); + ret = offset; + goto out; + } + dquot->dq_off = offset; + } + ddquot = getdqbuf(info->dqi_entry_size); + if (!ddquot) + return -ENOMEM; + ret = sb->s_op->quota_read(sb, type, (char *)ddquot, + info->dqi_entry_size, dquot->dq_off); + if (ret != info->dqi_entry_size) { + if (ret >= 0) + ret = -EIO; + printk(KERN_ERR "VFS: Error while reading quota " + "structure for id %u.\n", dquot->dq_id); + set_bit(DQ_FAKE_B, &dquot->dq_flags); + memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); + freedqbuf(ddquot); + goto out; + } + spin_lock(&dq_data_lock); + info->dqi_ops->disk2mem_dqblk(dquot, ddquot); + if (!dquot->dq_dqb.dqb_bhardlimit && + !dquot->dq_dqb.dqb_bsoftlimit && + !dquot->dq_dqb.dqb_ihardlimit && + !dquot->dq_dqb.dqb_isoftlimit) + set_bit(DQ_FAKE_B, &dquot->dq_flags); + spin_unlock(&dq_data_lock); + freedqbuf(ddquot); +out: + dqstats.reads++; + return ret; +} +EXPORT_SYMBOL(qtree_read_dquot); + +/* Check whether dquot should not be deleted. We know we are + * the only one operating on dquot (thanks to dq_lock) */ +int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) + return qtree_delete_dquot(info, dquot); + return 0; +} +EXPORT_SYMBOL(qtree_release_dquot); diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h new file mode 100644 index 00000000000..a1ab8db81a5 --- /dev/null +++ b/fs/quota/quota_tree.h @@ -0,0 +1,25 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_QUOTA_TREE_H +#define _LINUX_QUOTA_TREE_H + +#include +#include + +/* + * Structure of header of block with quota structures. It is padded to 16 bytes so + * there will be space for exactly 21 quota-entries in a block + */ +struct qt_disk_dqdbheader { + __le32 dqdh_next_free; /* Number of next block with free entry */ + __le32 dqdh_prev_free; /* Number of previous block with free entry */ + __le16 dqdh_entries; /* Number of valid entries in block */ + __le16 dqdh_pad1; + __le32 dqdh_pad2; +}; + +#define QT_TREEOFF 1 /* Offset of tree in file in blocks */ + +#endif /* _LINUX_QUOTAIO_TREE_H */ diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c new file mode 100644 index 00000000000..b4af1c69ad1 --- /dev/null +++ b/fs/quota/quota_v1.c @@ -0,0 +1,218 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "quotaio_v1.h" + +MODULE_AUTHOR("Jan Kara"); +MODULE_DESCRIPTION("Old quota format support"); +MODULE_LICENSE("GPL"); + +#define QUOTABLOCK_BITS 10 +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) + +static inline qsize_t v1_stoqb(qsize_t space) +{ + return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; +} + +static inline qsize_t v1_qbtos(qsize_t blocks) +{ + return blocks << QUOTABLOCK_BITS; +} + +static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d) +{ + m->dqb_ihardlimit = d->dqb_ihardlimit; + m->dqb_isoftlimit = d->dqb_isoftlimit; + m->dqb_curinodes = d->dqb_curinodes; + m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit); + m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit); + m->dqb_curspace = v1_qbtos(d->dqb_curblocks); + m->dqb_itime = d->dqb_itime; + m->dqb_btime = d->dqb_btime; +} + +static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m) +{ + d->dqb_ihardlimit = m->dqb_ihardlimit; + d->dqb_isoftlimit = m->dqb_isoftlimit; + d->dqb_curinodes = m->dqb_curinodes; + d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit); + d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit); + d->dqb_curblocks = v1_stoqb(m->dqb_curspace); + d->dqb_itime = m->dqb_itime; + d->dqb_btime = m->dqb_btime; +} + +static int v1_read_dqblk(struct dquot *dquot) +{ + int type = dquot->dq_type; + struct v1_disk_dqblk dqblk; + + if (!sb_dqopt(dquot->dq_sb)->files[type]) + return -EINVAL; + + /* Set structure to 0s in case read fails/is after end of file */ + memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); + dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); + + v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); + if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && + dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) + set_bit(DQ_FAKE_B, &dquot->dq_flags); + dqstats.reads++; + + return 0; +} + +static int v1_commit_dqblk(struct dquot *dquot) +{ + short type = dquot->dq_type; + ssize_t ret; + struct v1_disk_dqblk dqblk; + + v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); + if (dquot->dq_id == 0) { + dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; + dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; + } + ret = 0; + if (sb_dqopt(dquot->dq_sb)->files[type]) + ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); + if (ret != sizeof(struct v1_disk_dqblk)) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", + dquot->dq_sb->s_id); + if (ret >= 0) + ret = -EIO; + goto out; + } + ret = 0; + +out: + dqstats.writes++; + + return ret; +} + +/* Magics of new quota format */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +/* Header of new quota format */ +struct v2_disk_dqheader { + __le32 dqh_magic; /* Magic number identifying file */ + __le32 dqh_version; /* File version */ +}; + +static int v1_check_quota_file(struct super_block *sb, int type) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + ulong blocks; + size_t off; + struct v2_disk_dqheader dqhead; + ssize_t size; + loff_t isize; + static const uint quota_magics[] = V2_INITQMAGICS; + + isize = i_size_read(inode); + if (!isize) + return 0; + blocks = isize >> BLOCK_SIZE_BITS; + off = isize & (BLOCK_SIZE - 1); + if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) + return 0; + /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ + size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); + if (size != sizeof(struct v2_disk_dqheader)) + return 1; /* Probably not new format */ + if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) + return 1; /* Definitely not new format */ + printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); + return 0; /* Seems like a new format file -> refuse it */ +} + +static int v1_read_file_info(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + struct v1_disk_dqblk dqblk; + int ret; + + if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + goto out; + } + ret = 0; + /* limits are stored as unsigned 32-bit data */ + dqopt->info[type].dqi_maxblimit = 0xffffffff; + dqopt->info[type].dqi_maxilimit = 0xffffffff; + dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; + dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; +out: + return ret; +} + +static int v1_write_file_info(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + struct v1_disk_dqblk dqblk; + int ret; + + dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; + if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + goto out; + } + dqblk.dqb_itime = dqopt->info[type].dqi_igrace; + dqblk.dqb_btime = dqopt->info[type].dqi_bgrace; + ret = sb->s_op->quota_write(sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(0)); + if (ret == sizeof(struct v1_disk_dqblk)) + ret = 0; + else if (ret > 0) + ret = -EIO; +out: + return ret; +} + +static struct quota_format_ops v1_format_ops = { + .check_quota_file = v1_check_quota_file, + .read_file_info = v1_read_file_info, + .write_file_info = v1_write_file_info, + .free_file_info = NULL, + .read_dqblk = v1_read_dqblk, + .commit_dqblk = v1_commit_dqblk, +}; + +static struct quota_format_type v1_quota_format = { + .qf_fmt_id = QFMT_VFS_OLD, + .qf_ops = &v1_format_ops, + .qf_owner = THIS_MODULE +}; + +static int __init init_v1_quota_format(void) +{ + return register_quota_format(&v1_quota_format); +} + +static void __exit exit_v1_quota_format(void) +{ + unregister_quota_format(&v1_quota_format); +} + +module_init(init_v1_quota_format); +module_exit(exit_v1_quota_format); + diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c new file mode 100644 index 00000000000..b618b563635 --- /dev/null +++ b/fs/quota/quota_v2.c @@ -0,0 +1,236 @@ +/* + * vfsv0 quota IO operations on file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "quota_tree.h" +#include "quotaio_v2.h" + +MODULE_AUTHOR("Jan Kara"); +MODULE_DESCRIPTION("Quota format v2 support"); +MODULE_LICENSE("GPL"); + +#define __QUOTA_V2_PARANOIA + +static void v2_mem2diskdqb(void *dp, struct dquot *dquot); +static void v2_disk2memdqb(struct dquot *dquot, void *dp); +static int v2_is_id(void *dp, struct dquot *dquot); + +static struct qtree_fmt_operations v2_qtree_ops = { + .mem2disk_dqblk = v2_mem2diskdqb, + .disk2mem_dqblk = v2_disk2memdqb, + .is_id = v2_is_id, +}; + +#define QUOTABLOCK_BITS 10 +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) + +static inline qsize_t v2_stoqb(qsize_t space) +{ + return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; +} + +static inline qsize_t v2_qbtos(qsize_t blocks) +{ + return blocks << QUOTABLOCK_BITS; +} + +/* Check whether given file is really vfsv0 quotafile */ +static int v2_check_quota_file(struct super_block *sb, int type) +{ + struct v2_disk_dqheader dqhead; + ssize_t size; + static const uint quota_magics[] = V2_INITQMAGICS; + static const uint quota_versions[] = V2_INITQVERSIONS; + + size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); + if (size != sizeof(struct v2_disk_dqheader)) { + printk("quota_v2: failed read expected=%zd got=%zd\n", + sizeof(struct v2_disk_dqheader), size); + return 0; + } + if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || + le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) + return 0; + return 1; +} + +/* Read information header from quota file */ +static int v2_read_file_info(struct super_block *sb, int type) +{ + struct v2_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct qtree_mem_dqinfo *qinfo; + ssize_t size; + + size = sb->s_op->quota_read(sb, type, (char *)&dinfo, + sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); + if (size != sizeof(struct v2_disk_dqinfo)) { + printk(KERN_WARNING "Can't read info structure on device %s.\n", + sb->s_id); + return -1; + } + info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); + if (!info->dqi_priv) { + printk(KERN_WARNING + "Not enough memory for quota information structure.\n"); + return -1; + } + qinfo = info->dqi_priv; + /* limits are stored as unsigned 32-bit data */ + info->dqi_maxblimit = 0xffffffff; + info->dqi_maxilimit = 0xffffffff; + info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); + info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); + info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); + qinfo->dqi_sb = sb; + qinfo->dqi_type = type; + qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); + qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); + qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); + qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; + qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; + qinfo->dqi_qtree_depth = qtree_depth(qinfo); + qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk); + qinfo->dqi_ops = &v2_qtree_ops; + return 0; +} + +/* Write information header to quota file */ +static int v2_write_file_info(struct super_block *sb, int type) +{ + struct v2_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct qtree_mem_dqinfo *qinfo = info->dqi_priv; + ssize_t size; + + spin_lock(&dq_data_lock); + info->dqi_flags &= ~DQF_INFO_DIRTY; + dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); + dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); + dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); + spin_unlock(&dq_data_lock); + dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks); + dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk); + dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); + size = sb->s_op->quota_write(sb, type, (char *)&dinfo, + sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); + if (size != sizeof(struct v2_disk_dqinfo)) { + printk(KERN_WARNING "Can't write info structure on device %s.\n", + sb->s_id); + return -1; + } + return 0; +} + +static void v2_disk2memdqb(struct dquot *dquot, void *dp) +{ + struct v2_disk_dqblk *d = dp, empty; + struct mem_dqblk *m = &dquot->dq_dqb; + + m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); + m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); + m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); + m->dqb_itime = le64_to_cpu(d->dqb_itime); + m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit)); + m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit)); + m->dqb_curspace = le64_to_cpu(d->dqb_curspace); + m->dqb_btime = le64_to_cpu(d->dqb_btime); + /* We need to escape back all-zero structure */ + memset(&empty, 0, sizeof(struct v2_disk_dqblk)); + empty.dqb_itime = cpu_to_le64(1); + if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk))) + m->dqb_itime = 0; +} + +static void v2_mem2diskdqb(void *dp, struct dquot *dquot) +{ + struct v2_disk_dqblk *d = dp; + struct mem_dqblk *m = &dquot->dq_dqb; + struct qtree_mem_dqinfo *info = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + + d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); + d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); + d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); + d->dqb_itime = cpu_to_le64(m->dqb_itime); + d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit)); + d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_id = cpu_to_le32(dquot->dq_id); + if (qtree_entry_unused(info, dp)) + d->dqb_itime = cpu_to_le64(1); +} + +static int v2_is_id(void *dp, struct dquot *dquot) +{ + struct v2_disk_dqblk *d = dp; + struct qtree_mem_dqinfo *info = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + + if (qtree_entry_unused(info, dp)) + return 0; + return le32_to_cpu(d->dqb_id) == dquot->dq_id; +} + +static int v2_read_dquot(struct dquot *dquot) +{ + return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); +} + +static int v2_write_dquot(struct dquot *dquot) +{ + return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); +} + +static int v2_release_dquot(struct dquot *dquot) +{ + return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); +} + +static int v2_free_file_info(struct super_block *sb, int type) +{ + kfree(sb_dqinfo(sb, type)->dqi_priv); + return 0; +} + +static struct quota_format_ops v2_format_ops = { + .check_quota_file = v2_check_quota_file, + .read_file_info = v2_read_file_info, + .write_file_info = v2_write_file_info, + .free_file_info = v2_free_file_info, + .read_dqblk = v2_read_dquot, + .commit_dqblk = v2_write_dquot, + .release_dqblk = v2_release_dquot, +}; + +static struct quota_format_type v2_quota_format = { + .qf_fmt_id = QFMT_VFS_V0, + .qf_ops = &v2_format_ops, + .qf_owner = THIS_MODULE +}; + +static int __init init_v2_quota_format(void) +{ + return register_quota_format(&v2_quota_format); +} + +static void __exit exit_v2_quota_format(void) +{ + unregister_quota_format(&v2_quota_format); +} + +module_init(init_v2_quota_format); +module_exit(exit_v2_quota_format); diff --git a/fs/quota/quotaio_v1.h b/fs/quota/quotaio_v1.h new file mode 100644 index 00000000000..746654b5de7 --- /dev/null +++ b/fs/quota/quotaio_v1.h @@ -0,0 +1,33 @@ +#ifndef _LINUX_QUOTAIO_V1_H +#define _LINUX_QUOTAIO_V1_H + +#include + +/* + * The following constants define the amount of time given a user + * before the soft limits are treated as hard limits (usually resulting + * in an allocation failure). The timer is started when the user crosses + * their soft limit, it is reset when they go below their soft limit. + */ +#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ +#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is an array of these structures + * indexed by user or group number. + */ +struct v1_disk_dqblk { + __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ + __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + __u32 dqb_curblocks; /* current block count */ + __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive inode use */ +}; + +#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) + +#endif /* _LINUX_QUOTAIO_V1_H */ diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h new file mode 100644 index 00000000000..530fe580685 --- /dev/null +++ b/fs/quota/quotaio_v2.h @@ -0,0 +1,60 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_QUOTAIO_V2_H +#define _LINUX_QUOTAIO_V2_H + +#include +#include + +/* + * Definitions of magics and versions of current quota files + */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +#define V2_INITQVERSIONS {\ + 0, /* USRQUOTA */\ + 0 /* GRPQUOTA */\ +} + +/* First generic header */ +struct v2_disk_dqheader { + __le32 dqh_magic; /* Magic number identifying file */ + __le32 dqh_version; /* File version */ +}; + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is a radix tree whose leaves point + * to blocks of these structures. + */ +struct v2_disk_dqblk { + __le32 dqb_id; /* id this quota applies to */ + __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __le32 dqb_isoftlimit; /* preferred inode limit */ + __le32 dqb_curinodes; /* current # allocated inodes */ + __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ + __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ + __le64 dqb_curspace; /* current space occupied (in bytes) */ + __le64 dqb_btime; /* time limit for excessive disk use */ + __le64 dqb_itime; /* time limit for excessive inode use */ +}; + +/* Header with type and version specific information */ +struct v2_disk_dqinfo { + __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ + __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ + __le32 dqi_flags; /* Flags for quotafile (DQF_*) */ + __le32 dqi_blocks; /* Number of blocks in file */ + __le32 dqi_free_blk; /* Number of first free block in the list */ + __le32 dqi_free_entry; /* Number of block with at least one free entry */ +}; + +#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ +#define V2_DQBLKSIZE_BITS 10 /* Size of leaf block in tree */ + +#endif /* _LINUX_QUOTAIO_V2_H */ diff --git a/fs/quota_tree.c b/fs/quota_tree.c deleted file mode 100644 index 953404c95b1..00000000000 --- a/fs/quota_tree.c +++ /dev/null @@ -1,645 +0,0 @@ -/* - * vfsv0 quota IO operations on file - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "quota_tree.h" - -MODULE_AUTHOR("Jan Kara"); -MODULE_DESCRIPTION("Quota trie support"); -MODULE_LICENSE("GPL"); - -#define __QUOTA_QT_PARANOIA - -typedef char *dqbuf_t; - -static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) -{ - unsigned int epb = info->dqi_usable_bs >> 2; - - depth = info->dqi_qtree_depth - depth - 1; - while (depth--) - id /= epb; - return id % epb; -} - -/* Number of entries in one blocks */ -static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) -{ - return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) - / info->dqi_entry_size; -} - -static dqbuf_t getdqbuf(size_t size) -{ - dqbuf_t buf = kmalloc(size, GFP_NOFS); - if (!buf) - printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); - return buf; -} - -static inline void freedqbuf(dqbuf_t buf) -{ - kfree(buf); -} - -static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) -{ - struct super_block *sb = info->dqi_sb; - - memset(buf, 0, info->dqi_usable_bs); - return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); -} - -static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) -{ - struct super_block *sb = info->dqi_sb; - - return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); -} - -/* Remove empty block from list and return it */ -static int get_free_dqblk(struct qtree_mem_dqinfo *info) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; - int ret, blk; - - if (!buf) - return -ENOMEM; - if (info->dqi_free_blk) { - blk = info->dqi_free_blk; - ret = read_blk(info, blk, buf); - if (ret < 0) - goto out_buf; - info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); - } - else { - memset(buf, 0, info->dqi_usable_bs); - /* Assure block allocation... */ - ret = write_blk(info, info->dqi_blocks, buf); - if (ret < 0) - goto out_buf; - blk = info->dqi_blocks++; - } - mark_info_dirty(info->dqi_sb, info->dqi_type); - ret = blk; -out_buf: - freedqbuf(buf); - return ret; -} - -/* Insert empty block to the list */ -static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) -{ - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; - int err; - - dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); - dh->dqdh_prev_free = cpu_to_le32(0); - dh->dqdh_entries = cpu_to_le16(0); - err = write_blk(info, blk, buf); - if (err < 0) - return err; - info->dqi_free_blk = blk; - mark_info_dirty(info->dqi_sb, info->dqi_type); - return 0; -} - -/* Remove given block from the list of blocks with free entries */ -static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) -{ - dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; - uint nextblk = le32_to_cpu(dh->dqdh_next_free); - uint prevblk = le32_to_cpu(dh->dqdh_prev_free); - int err; - - if (!tmpbuf) - return -ENOMEM; - if (nextblk) { - err = read_blk(info, nextblk, tmpbuf); - if (err < 0) - goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = - dh->dqdh_prev_free; - err = write_blk(info, nextblk, tmpbuf); - if (err < 0) - goto out_buf; - } - if (prevblk) { - err = read_blk(info, prevblk, tmpbuf); - if (err < 0) - goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = - dh->dqdh_next_free; - err = write_blk(info, prevblk, tmpbuf); - if (err < 0) - goto out_buf; - } else { - info->dqi_free_entry = nextblk; - mark_info_dirty(info->dqi_sb, info->dqi_type); - } - freedqbuf(tmpbuf); - dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); - /* No matter whether write succeeds block is out of list */ - if (write_blk(info, blk, buf) < 0) - printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); - return 0; -out_buf: - freedqbuf(tmpbuf); - return err; -} - -/* Insert given block to the beginning of list with free entries */ -static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) -{ - dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; - int err; - - if (!tmpbuf) - return -ENOMEM; - dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); - dh->dqdh_prev_free = cpu_to_le32(0); - err = write_blk(info, blk, buf); - if (err < 0) - goto out_buf; - if (info->dqi_free_entry) { - err = read_blk(info, info->dqi_free_entry, tmpbuf); - if (err < 0) - goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = - cpu_to_le32(blk); - err = write_blk(info, info->dqi_free_entry, tmpbuf); - if (err < 0) - goto out_buf; - } - freedqbuf(tmpbuf); - info->dqi_free_entry = blk; - mark_info_dirty(info->dqi_sb, info->dqi_type); - return 0; -out_buf: - freedqbuf(tmpbuf); - return err; -} - -/* Is the entry in the block free? */ -int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) -{ - int i; - - for (i = 0; i < info->dqi_entry_size; i++) - if (disk[i]) - return 0; - return 1; -} -EXPORT_SYMBOL(qtree_entry_unused); - -/* Find space for dquot */ -static uint find_free_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot, int *err) -{ - uint blk, i; - struct qt_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - char *ddquot; - - *err = 0; - if (!buf) { - *err = -ENOMEM; - return 0; - } - dh = (struct qt_disk_dqdbheader *)buf; - if (info->dqi_free_entry) { - blk = info->dqi_free_entry; - *err = read_blk(info, blk, buf); - if (*err < 0) - goto out_buf; - } else { - blk = get_free_dqblk(info); - if ((int)blk < 0) { - *err = blk; - freedqbuf(buf); - return 0; - } - memset(buf, 0, info->dqi_usable_bs); - /* This is enough as block is already zeroed and entry list is empty... */ - info->dqi_free_entry = blk; - mark_info_dirty(dquot->dq_sb, dquot->dq_type); - } - /* Block will be full? */ - if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { - *err = remove_free_dqentry(info, buf, blk); - if (*err < 0) { - printk(KERN_ERR "VFS: find_free_dqentry(): Can't " - "remove block (%u) from entry free list.\n", - blk); - goto out_buf; - } - } - le16_add_cpu(&dh->dqdh_entries, 1); - /* Find free structure in block */ - for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); - i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); - i++, ddquot += info->dqi_entry_size); -#ifdef __QUOTA_QT_PARANOIA - if (i == qtree_dqstr_in_blk(info)) { - printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " - "but it shouldn't.\n"); - *err = -EIO; - goto out_buf; - } -#endif - *err = write_blk(info, blk, buf); - if (*err < 0) { - printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota " - "data block %u.\n", blk); - goto out_buf; - } - dquot->dq_off = (blk << info->dqi_blocksize_bits) + - sizeof(struct qt_disk_dqdbheader) + - i * info->dqi_entry_size; - freedqbuf(buf); - return blk; -out_buf: - freedqbuf(buf); - return 0; -} - -/* Insert reference to structure into the trie */ -static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, - uint *treeblk, int depth) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - int ret = 0, newson = 0, newact = 0; - __le32 *ref; - uint newblk; - - if (!buf) - return -ENOMEM; - if (!*treeblk) { - ret = get_free_dqblk(info); - if (ret < 0) - goto out_buf; - *treeblk = ret; - memset(buf, 0, info->dqi_usable_bs); - newact = 1; - } else { - ret = read_blk(info, *treeblk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read tree quota block " - "%u.\n", *treeblk); - goto out_buf; - } - } - ref = (__le32 *)buf; - newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); - if (!newblk) - newson = 1; - if (depth == info->dqi_qtree_depth - 1) { -#ifdef __QUOTA_QT_PARANOIA - if (newblk) { - printk(KERN_ERR "VFS: Inserting already present quota " - "entry (block %u).\n", - le32_to_cpu(ref[get_index(info, - dquot->dq_id, depth)])); - ret = -EIO; - goto out_buf; - } -#endif - newblk = find_free_dqentry(info, dquot, &ret); - } else { - ret = do_insert_tree(info, dquot, &newblk, depth+1); - } - if (newson && ret >= 0) { - ref[get_index(info, dquot->dq_id, depth)] = - cpu_to_le32(newblk); - ret = write_blk(info, *treeblk, buf); - } else if (newact && ret < 0) { - put_free_dqblk(info, buf, *treeblk); - } -out_buf: - freedqbuf(buf); - return ret; -} - -/* Wrapper for inserting quota structure into tree */ -static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, - struct dquot *dquot) -{ - int tmp = QT_TREEOFF; - return do_insert_tree(info, dquot, &tmp, 0); -} - -/* - * We don't have to be afraid of deadlocks as we never have quotas on quota files... - */ -int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) -{ - int type = dquot->dq_type; - struct super_block *sb = dquot->dq_sb; - ssize_t ret; - dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); - - if (!ddquot) - return -ENOMEM; - - /* dq_off is guarded by dqio_mutex */ - if (!dquot->dq_off) { - ret = dq_insert_tree(info, dquot); - if (ret < 0) { - printk(KERN_ERR "VFS: Error %zd occurred while " - "creating quota.\n", ret); - freedqbuf(ddquot); - return ret; - } - } - spin_lock(&dq_data_lock); - info->dqi_ops->mem2disk_dqblk(ddquot, dquot); - spin_unlock(&dq_data_lock); - ret = sb->s_op->quota_write(sb, type, (char *)ddquot, - info->dqi_entry_size, dquot->dq_off); - if (ret != info->dqi_entry_size) { - printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", - sb->s_id); - if (ret >= 0) - ret = -ENOSPC; - } else { - ret = 0; - } - dqstats.writes++; - freedqbuf(ddquot); - - return ret; -} -EXPORT_SYMBOL(qtree_write_dquot); - -/* Free dquot entry in data block */ -static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, - uint blk) -{ - struct qt_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - int ret = 0; - - if (!buf) - return -ENOMEM; - if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { - printk(KERN_ERR "VFS: Quota structure has offset to other " - "block (%u) than it should (%u).\n", blk, - (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); - goto out_buf; - } - ret = read_blk(info, blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); - goto out_buf; - } - dh = (struct qt_disk_dqdbheader *)buf; - le16_add_cpu(&dh->dqdh_entries, -1); - if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ - ret = remove_free_dqentry(info, buf, blk); - if (ret >= 0) - ret = put_free_dqblk(info, buf, blk); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't move quota data block (%u) " - "to free list.\n", blk); - goto out_buf; - } - } else { - memset(buf + - (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), - 0, info->dqi_entry_size); - if (le16_to_cpu(dh->dqdh_entries) == - qtree_dqstr_in_blk(info) - 1) { - /* Insert will write block itself */ - ret = insert_free_dqentry(info, buf, blk); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't insert quota data " - "block (%u) to free entry list.\n", blk); - goto out_buf; - } - } else { - ret = write_blk(info, blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't write quota data " - "block %u\n", blk); - goto out_buf; - } - } - } - dquot->dq_off = 0; /* Quota is now unattached */ -out_buf: - freedqbuf(buf); - return ret; -} - -/* Remove reference to dquot from tree */ -static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, - uint *blk, int depth) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - int ret = 0; - uint newblk; - __le32 *ref = (__le32 *)buf; - - if (!buf) - return -ENOMEM; - ret = read_blk(info, *blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); - goto out_buf; - } - newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); - if (depth == info->dqi_qtree_depth - 1) { - ret = free_dqentry(info, dquot, newblk); - newblk = 0; - } else { - ret = remove_tree(info, dquot, &newblk, depth+1); - } - if (ret >= 0 && !newblk) { - int i; - ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); - /* Block got empty? */ - for (i = 0; - i < (info->dqi_usable_bs >> 2) && !ref[i]; - i++); - /* Don't put the root block into the free block list */ - if (i == (info->dqi_usable_bs >> 2) - && *blk != QT_TREEOFF) { - put_free_dqblk(info, buf, *blk); - *blk = 0; - } else { - ret = write_blk(info, *blk, buf); - if (ret < 0) - printk(KERN_ERR "VFS: Can't write quota tree " - "block %u.\n", *blk); - } - } -out_buf: - freedqbuf(buf); - return ret; -} - -/* Delete dquot from tree */ -int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) -{ - uint tmp = QT_TREEOFF; - - if (!dquot->dq_off) /* Even not allocated? */ - return 0; - return remove_tree(info, dquot, &tmp, 0); -} -EXPORT_SYMBOL(qtree_delete_dquot); - -/* Find entry in block */ -static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot, uint blk) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - loff_t ret = 0; - int i; - char *ddquot; - - if (!buf) - return -ENOMEM; - ret = read_blk(info, blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); - goto out_buf; - } - for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); - i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); - i++, ddquot += info->dqi_entry_size); - if (i == qtree_dqstr_in_blk(info)) { - printk(KERN_ERR "VFS: Quota for id %u referenced " - "but not present.\n", dquot->dq_id); - ret = -EIO; - goto out_buf; - } else { - ret = (blk << info->dqi_blocksize_bits) + sizeof(struct - qt_disk_dqdbheader) + i * info->dqi_entry_size; - } -out_buf: - freedqbuf(buf); - return ret; -} - -/* Find entry for given id in the tree */ -static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot, uint blk, int depth) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - loff_t ret = 0; - __le32 *ref = (__le32 *)buf; - - if (!buf) - return -ENOMEM; - ret = read_blk(info, blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); - goto out_buf; - } - ret = 0; - blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); - if (!blk) /* No reference? */ - goto out_buf; - if (depth < info->dqi_qtree_depth - 1) - ret = find_tree_dqentry(info, dquot, blk, depth+1); - else - ret = find_block_dqentry(info, dquot, blk); -out_buf: - freedqbuf(buf); - return ret; -} - -/* Find entry for given id in the tree - wrapper function */ -static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot) -{ - return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); -} - -int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) -{ - int type = dquot->dq_type; - struct super_block *sb = dquot->dq_sb; - loff_t offset; - dqbuf_t ddquot; - int ret = 0; - -#ifdef __QUOTA_QT_PARANOIA - /* Invalidated quota? */ - if (!sb_dqopt(dquot->dq_sb)->files[type]) { - printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); - return -EIO; - } -#endif - /* Do we know offset of the dquot entry in the quota file? */ - if (!dquot->dq_off) { - offset = find_dqentry(info, dquot); - if (offset <= 0) { /* Entry not present? */ - if (offset < 0) - printk(KERN_ERR "VFS: Can't read quota " - "structure for id %u.\n", dquot->dq_id); - dquot->dq_off = 0; - set_bit(DQ_FAKE_B, &dquot->dq_flags); - memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); - ret = offset; - goto out; - } - dquot->dq_off = offset; - } - ddquot = getdqbuf(info->dqi_entry_size); - if (!ddquot) - return -ENOMEM; - ret = sb->s_op->quota_read(sb, type, (char *)ddquot, - info->dqi_entry_size, dquot->dq_off); - if (ret != info->dqi_entry_size) { - if (ret >= 0) - ret = -EIO; - printk(KERN_ERR "VFS: Error while reading quota " - "structure for id %u.\n", dquot->dq_id); - set_bit(DQ_FAKE_B, &dquot->dq_flags); - memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); - freedqbuf(ddquot); - goto out; - } - spin_lock(&dq_data_lock); - info->dqi_ops->disk2mem_dqblk(dquot, ddquot); - if (!dquot->dq_dqb.dqb_bhardlimit && - !dquot->dq_dqb.dqb_bsoftlimit && - !dquot->dq_dqb.dqb_ihardlimit && - !dquot->dq_dqb.dqb_isoftlimit) - set_bit(DQ_FAKE_B, &dquot->dq_flags); - spin_unlock(&dq_data_lock); - freedqbuf(ddquot); -out: - dqstats.reads++; - return ret; -} -EXPORT_SYMBOL(qtree_read_dquot); - -/* Check whether dquot should not be deleted. We know we are - * the only one operating on dquot (thanks to dq_lock) */ -int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) -{ - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) - return qtree_delete_dquot(info, dquot); - return 0; -} -EXPORT_SYMBOL(qtree_release_dquot); diff --git a/fs/quota_tree.h b/fs/quota_tree.h deleted file mode 100644 index a1ab8db81a5..00000000000 --- a/fs/quota_tree.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Definitions of structures for vfsv0 quota format - */ - -#ifndef _LINUX_QUOTA_TREE_H -#define _LINUX_QUOTA_TREE_H - -#include -#include - -/* - * Structure of header of block with quota structures. It is padded to 16 bytes so - * there will be space for exactly 21 quota-entries in a block - */ -struct qt_disk_dqdbheader { - __le32 dqdh_next_free; /* Number of next block with free entry */ - __le32 dqdh_prev_free; /* Number of previous block with free entry */ - __le16 dqdh_entries; /* Number of valid entries in block */ - __le16 dqdh_pad1; - __le32 dqdh_pad2; -}; - -#define QT_TREEOFF 1 /* Offset of tree in file in blocks */ - -#endif /* _LINUX_QUOTAIO_TREE_H */ diff --git a/fs/quota_v1.c b/fs/quota_v1.c deleted file mode 100644 index b4af1c69ad1..00000000000 --- a/fs/quota_v1.c +++ /dev/null @@ -1,218 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "quotaio_v1.h" - -MODULE_AUTHOR("Jan Kara"); -MODULE_DESCRIPTION("Old quota format support"); -MODULE_LICENSE("GPL"); - -#define QUOTABLOCK_BITS 10 -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) - -static inline qsize_t v1_stoqb(qsize_t space) -{ - return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; -} - -static inline qsize_t v1_qbtos(qsize_t blocks) -{ - return blocks << QUOTABLOCK_BITS; -} - -static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d) -{ - m->dqb_ihardlimit = d->dqb_ihardlimit; - m->dqb_isoftlimit = d->dqb_isoftlimit; - m->dqb_curinodes = d->dqb_curinodes; - m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit); - m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit); - m->dqb_curspace = v1_qbtos(d->dqb_curblocks); - m->dqb_itime = d->dqb_itime; - m->dqb_btime = d->dqb_btime; -} - -static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m) -{ - d->dqb_ihardlimit = m->dqb_ihardlimit; - d->dqb_isoftlimit = m->dqb_isoftlimit; - d->dqb_curinodes = m->dqb_curinodes; - d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit); - d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit); - d->dqb_curblocks = v1_stoqb(m->dqb_curspace); - d->dqb_itime = m->dqb_itime; - d->dqb_btime = m->dqb_btime; -} - -static int v1_read_dqblk(struct dquot *dquot) -{ - int type = dquot->dq_type; - struct v1_disk_dqblk dqblk; - - if (!sb_dqopt(dquot->dq_sb)->files[type]) - return -EINVAL; - - /* Set structure to 0s in case read fails/is after end of file */ - memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); - dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); - - v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); - if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && - dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) - set_bit(DQ_FAKE_B, &dquot->dq_flags); - dqstats.reads++; - - return 0; -} - -static int v1_commit_dqblk(struct dquot *dquot) -{ - short type = dquot->dq_type; - ssize_t ret; - struct v1_disk_dqblk dqblk; - - v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); - if (dquot->dq_id == 0) { - dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; - dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; - } - ret = 0; - if (sb_dqopt(dquot->dq_sb)->files[type]) - ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); - if (ret != sizeof(struct v1_disk_dqblk)) { - printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", - dquot->dq_sb->s_id); - if (ret >= 0) - ret = -EIO; - goto out; - } - ret = 0; - -out: - dqstats.writes++; - - return ret; -} - -/* Magics of new quota format */ -#define V2_INITQMAGICS {\ - 0xd9c01f11, /* USRQUOTA */\ - 0xd9c01927 /* GRPQUOTA */\ -} - -/* Header of new quota format */ -struct v2_disk_dqheader { - __le32 dqh_magic; /* Magic number identifying file */ - __le32 dqh_version; /* File version */ -}; - -static int v1_check_quota_file(struct super_block *sb, int type) -{ - struct inode *inode = sb_dqopt(sb)->files[type]; - ulong blocks; - size_t off; - struct v2_disk_dqheader dqhead; - ssize_t size; - loff_t isize; - static const uint quota_magics[] = V2_INITQMAGICS; - - isize = i_size_read(inode); - if (!isize) - return 0; - blocks = isize >> BLOCK_SIZE_BITS; - off = isize & (BLOCK_SIZE - 1); - if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) - return 0; - /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); - if (size != sizeof(struct v2_disk_dqheader)) - return 1; /* Probably not new format */ - if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) - return 1; /* Definitely not new format */ - printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); - return 0; /* Seems like a new format file -> refuse it */ -} - -static int v1_read_file_info(struct super_block *sb, int type) -{ - struct quota_info *dqopt = sb_dqopt(sb); - struct v1_disk_dqblk dqblk; - int ret; - - if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { - if (ret >= 0) - ret = -EIO; - goto out; - } - ret = 0; - /* limits are stored as unsigned 32-bit data */ - dqopt->info[type].dqi_maxblimit = 0xffffffff; - dqopt->info[type].dqi_maxilimit = 0xffffffff; - dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; - dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; -out: - return ret; -} - -static int v1_write_file_info(struct super_block *sb, int type) -{ - struct quota_info *dqopt = sb_dqopt(sb); - struct v1_disk_dqblk dqblk; - int ret; - - dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; - if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { - if (ret >= 0) - ret = -EIO; - goto out; - } - dqblk.dqb_itime = dqopt->info[type].dqi_igrace; - dqblk.dqb_btime = dqopt->info[type].dqi_bgrace; - ret = sb->s_op->quota_write(sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(0)); - if (ret == sizeof(struct v1_disk_dqblk)) - ret = 0; - else if (ret > 0) - ret = -EIO; -out: - return ret; -} - -static struct quota_format_ops v1_format_ops = { - .check_quota_file = v1_check_quota_file, - .read_file_info = v1_read_file_info, - .write_file_info = v1_write_file_info, - .free_file_info = NULL, - .read_dqblk = v1_read_dqblk, - .commit_dqblk = v1_commit_dqblk, -}; - -static struct quota_format_type v1_quota_format = { - .qf_fmt_id = QFMT_VFS_OLD, - .qf_ops = &v1_format_ops, - .qf_owner = THIS_MODULE -}; - -static int __init init_v1_quota_format(void) -{ - return register_quota_format(&v1_quota_format); -} - -static void __exit exit_v1_quota_format(void) -{ - unregister_quota_format(&v1_quota_format); -} - -module_init(init_v1_quota_format); -module_exit(exit_v1_quota_format); - diff --git a/fs/quota_v2.c b/fs/quota_v2.c deleted file mode 100644 index b618b563635..00000000000 --- a/fs/quota_v2.c +++ /dev/null @@ -1,236 +0,0 @@ -/* - * vfsv0 quota IO operations on file - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "quota_tree.h" -#include "quotaio_v2.h" - -MODULE_AUTHOR("Jan Kara"); -MODULE_DESCRIPTION("Quota format v2 support"); -MODULE_LICENSE("GPL"); - -#define __QUOTA_V2_PARANOIA - -static void v2_mem2diskdqb(void *dp, struct dquot *dquot); -static void v2_disk2memdqb(struct dquot *dquot, void *dp); -static int v2_is_id(void *dp, struct dquot *dquot); - -static struct qtree_fmt_operations v2_qtree_ops = { - .mem2disk_dqblk = v2_mem2diskdqb, - .disk2mem_dqblk = v2_disk2memdqb, - .is_id = v2_is_id, -}; - -#define QUOTABLOCK_BITS 10 -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) - -static inline qsize_t v2_stoqb(qsize_t space) -{ - return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; -} - -static inline qsize_t v2_qbtos(qsize_t blocks) -{ - return blocks << QUOTABLOCK_BITS; -} - -/* Check whether given file is really vfsv0 quotafile */ -static int v2_check_quota_file(struct super_block *sb, int type) -{ - struct v2_disk_dqheader dqhead; - ssize_t size; - static const uint quota_magics[] = V2_INITQMAGICS; - static const uint quota_versions[] = V2_INITQVERSIONS; - - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); - if (size != sizeof(struct v2_disk_dqheader)) { - printk("quota_v2: failed read expected=%zd got=%zd\n", - sizeof(struct v2_disk_dqheader), size); - return 0; - } - if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || - le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) - return 0; - return 1; -} - -/* Read information header from quota file */ -static int v2_read_file_info(struct super_block *sb, int type) -{ - struct v2_disk_dqinfo dinfo; - struct mem_dqinfo *info = sb_dqinfo(sb, type); - struct qtree_mem_dqinfo *qinfo; - ssize_t size; - - size = sb->s_op->quota_read(sb, type, (char *)&dinfo, - sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); - if (size != sizeof(struct v2_disk_dqinfo)) { - printk(KERN_WARNING "Can't read info structure on device %s.\n", - sb->s_id); - return -1; - } - info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); - if (!info->dqi_priv) { - printk(KERN_WARNING - "Not enough memory for quota information structure.\n"); - return -1; - } - qinfo = info->dqi_priv; - /* limits are stored as unsigned 32-bit data */ - info->dqi_maxblimit = 0xffffffff; - info->dqi_maxilimit = 0xffffffff; - info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); - info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); - info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); - qinfo->dqi_sb = sb; - qinfo->dqi_type = type; - qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); - qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); - qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); - qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; - qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; - qinfo->dqi_qtree_depth = qtree_depth(qinfo); - qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk); - qinfo->dqi_ops = &v2_qtree_ops; - return 0; -} - -/* Write information header to quota file */ -static int v2_write_file_info(struct super_block *sb, int type) -{ - struct v2_disk_dqinfo dinfo; - struct mem_dqinfo *info = sb_dqinfo(sb, type); - struct qtree_mem_dqinfo *qinfo = info->dqi_priv; - ssize_t size; - - spin_lock(&dq_data_lock); - info->dqi_flags &= ~DQF_INFO_DIRTY; - dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); - dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); - dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); - spin_unlock(&dq_data_lock); - dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks); - dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk); - dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); - size = sb->s_op->quota_write(sb, type, (char *)&dinfo, - sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); - if (size != sizeof(struct v2_disk_dqinfo)) { - printk(KERN_WARNING "Can't write info structure on device %s.\n", - sb->s_id); - return -1; - } - return 0; -} - -static void v2_disk2memdqb(struct dquot *dquot, void *dp) -{ - struct v2_disk_dqblk *d = dp, empty; - struct mem_dqblk *m = &dquot->dq_dqb; - - m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); - m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); - m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); - m->dqb_itime = le64_to_cpu(d->dqb_itime); - m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit)); - m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit)); - m->dqb_curspace = le64_to_cpu(d->dqb_curspace); - m->dqb_btime = le64_to_cpu(d->dqb_btime); - /* We need to escape back all-zero structure */ - memset(&empty, 0, sizeof(struct v2_disk_dqblk)); - empty.dqb_itime = cpu_to_le64(1); - if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk))) - m->dqb_itime = 0; -} - -static void v2_mem2diskdqb(void *dp, struct dquot *dquot) -{ - struct v2_disk_dqblk *d = dp; - struct mem_dqblk *m = &dquot->dq_dqb; - struct qtree_mem_dqinfo *info = - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; - - d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); - d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); - d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); - d->dqb_itime = cpu_to_le64(m->dqb_itime); - d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit)); - d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); - d->dqb_curspace = cpu_to_le64(m->dqb_curspace); - d->dqb_btime = cpu_to_le64(m->dqb_btime); - d->dqb_id = cpu_to_le32(dquot->dq_id); - if (qtree_entry_unused(info, dp)) - d->dqb_itime = cpu_to_le64(1); -} - -static int v2_is_id(void *dp, struct dquot *dquot) -{ - struct v2_disk_dqblk *d = dp; - struct qtree_mem_dqinfo *info = - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; - - if (qtree_entry_unused(info, dp)) - return 0; - return le32_to_cpu(d->dqb_id) == dquot->dq_id; -} - -static int v2_read_dquot(struct dquot *dquot) -{ - return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); -} - -static int v2_write_dquot(struct dquot *dquot) -{ - return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); -} - -static int v2_release_dquot(struct dquot *dquot) -{ - return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); -} - -static int v2_free_file_info(struct super_block *sb, int type) -{ - kfree(sb_dqinfo(sb, type)->dqi_priv); - return 0; -} - -static struct quota_format_ops v2_format_ops = { - .check_quota_file = v2_check_quota_file, - .read_file_info = v2_read_file_info, - .write_file_info = v2_write_file_info, - .free_file_info = v2_free_file_info, - .read_dqblk = v2_read_dquot, - .commit_dqblk = v2_write_dquot, - .release_dqblk = v2_release_dquot, -}; - -static struct quota_format_type v2_quota_format = { - .qf_fmt_id = QFMT_VFS_V0, - .qf_ops = &v2_format_ops, - .qf_owner = THIS_MODULE -}; - -static int __init init_v2_quota_format(void) -{ - return register_quota_format(&v2_quota_format); -} - -static void __exit exit_v2_quota_format(void) -{ - unregister_quota_format(&v2_quota_format); -} - -module_init(init_v2_quota_format); -module_exit(exit_v2_quota_format); diff --git a/fs/quotaio_v1.h b/fs/quotaio_v1.h deleted file mode 100644 index 746654b5de7..00000000000 --- a/fs/quotaio_v1.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _LINUX_QUOTAIO_V1_H -#define _LINUX_QUOTAIO_V1_H - -#include - -/* - * The following constants define the amount of time given a user - * before the soft limits are treated as hard limits (usually resulting - * in an allocation failure). The timer is started when the user crosses - * their soft limit, it is reset when they go below their soft limit. - */ -#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ -#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is an array of these structures - * indexed by user or group number. - */ -struct v1_disk_dqblk { - __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ - __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ - __u32 dqb_curblocks; /* current block count */ - __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __u32 dqb_isoftlimit; /* preferred inode limit */ - __u32 dqb_curinodes; /* current # allocated inodes */ - time_t dqb_btime; /* time limit for excessive disk use */ - time_t dqb_itime; /* time limit for excessive inode use */ -}; - -#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) - -#endif /* _LINUX_QUOTAIO_V1_H */ diff --git a/fs/quotaio_v2.h b/fs/quotaio_v2.h deleted file mode 100644 index 530fe580685..00000000000 --- a/fs/quotaio_v2.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Definitions of structures for vfsv0 quota format - */ - -#ifndef _LINUX_QUOTAIO_V2_H -#define _LINUX_QUOTAIO_V2_H - -#include -#include - -/* - * Definitions of magics and versions of current quota files - */ -#define V2_INITQMAGICS {\ - 0xd9c01f11, /* USRQUOTA */\ - 0xd9c01927 /* GRPQUOTA */\ -} - -#define V2_INITQVERSIONS {\ - 0, /* USRQUOTA */\ - 0 /* GRPQUOTA */\ -} - -/* First generic header */ -struct v2_disk_dqheader { - __le32 dqh_magic; /* Magic number identifying file */ - __le32 dqh_version; /* File version */ -}; - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is a radix tree whose leaves point - * to blocks of these structures. - */ -struct v2_disk_dqblk { - __le32 dqb_id; /* id this quota applies to */ - __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __le32 dqb_isoftlimit; /* preferred inode limit */ - __le32 dqb_curinodes; /* current # allocated inodes */ - __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ - __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ - __le64 dqb_curspace; /* current space occupied (in bytes) */ - __le64 dqb_btime; /* time limit for excessive disk use */ - __le64 dqb_itime; /* time limit for excessive inode use */ -}; - -/* Header with type and version specific information */ -struct v2_disk_dqinfo { - __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ - __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ - __le32 dqi_flags; /* Flags for quotafile (DQF_*) */ - __le32 dqi_blocks; /* Number of blocks in file */ - __le32 dqi_free_blk; /* Number of first free block in the list */ - __le32 dqi_free_entry; /* Number of block with at least one free entry */ -}; - -#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ -#define V2_DQBLKSIZE_BITS 10 /* Size of leaf block in tree */ - -#endif /* _LINUX_QUOTAIO_V2_H */ -- cgit v1.2.3-70-g09d2 From c516610cfec5c50f84ff8cc315628548481f4990 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 15:32:46 +0100 Subject: quota: Make global quota locks cacheline aligned Andrew Morton has suggested that three global quota locks can end up in the same cacheline which can result in bad cacheline ping-pong on SMP machines. Make locks cacheline aligned so that we avoid this problem (thanks goes to Andrew for the idea). Signed-off-by: Jan Kara CC: Andrew Morton --- fs/quota/dquot.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 28aa1466760..e840fa2b112 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -129,9 +129,9 @@ * i_mutex on quota files is special (it's below dqio_mutex) */ -static DEFINE_SPINLOCK(dq_list_lock); -static DEFINE_SPINLOCK(dq_state_lock); -DEFINE_SPINLOCK(dq_data_lock); +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); EXPORT_SYMBOL(dq_data_lock); static char *quotatypes[] = INITQFNAMES; -- cgit v1.2.3-70-g09d2 From dd6f3c6d5a26a282521f15a183fdc2d6f35cfa0f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:01:43 +0100 Subject: quota: Remove NODQUOT macro Remove this macro which is just a definition of NULL. Fix a few coding style issues along the way. Signed-off-by: Jan Kara --- fs/quota/dquot.c | 70 ++++++++++++++++++++++++++------------------------- include/linux/quota.h | 2 -- 2 files changed, 36 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index e840fa2b112..4881db32e56 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -253,7 +253,7 @@ static inline struct dquot *find_dquot(unsigned int hashent, struct super_block if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) return dquot; } - return NODQUOT; + return NULL; } /* Add a dquot to the tail of the free list */ @@ -696,7 +696,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) dquot = sb->dq_op->alloc_dquot(sb, type); if(!dquot) - return NODQUOT; + return NULL; mutex_init(&dquot->dq_lock); INIT_LIST_HEAD(&dquot->dq_free); @@ -722,10 +722,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { unsigned int hashent = hashfn(sb, id, type); - struct dquot *dquot = NODQUOT, *empty = NODQUOT; + struct dquot *dquot = NULL, *empty = NULL; if (!sb_has_quota_active(sb, type)) - return NODQUOT; + return NULL; we_slept: spin_lock(&dq_list_lock); spin_lock(&dq_state_lock); @@ -736,15 +736,17 @@ we_slept: } spin_unlock(&dq_state_lock); - if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { - if (empty == NODQUOT) { + dquot = find_dquot(hashent, sb, id, type); + if (!dquot) { + if (!empty) { spin_unlock(&dq_list_lock); - if ((empty = get_empty_dquot(sb, type)) == NODQUOT) + empty = get_empty_dquot(sb, type); + if (!empty) schedule(); /* Try to wait for a moment... */ goto we_slept; } dquot = empty; - empty = NODQUOT; + empty = NULL; dquot->dq_id = id; /* all dquots go on the inuse_list */ put_inuse(dquot); @@ -766,7 +768,7 @@ we_slept: /* Read the dquot and instantiate it (everything done only if needed) */ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { dqput(dquot); - dquot = NODQUOT; + dquot = NULL; goto out; } #ifdef __DQUOT_PARANOIA @@ -787,9 +789,9 @@ static int dqinit_needed(struct inode *inode, int type) if (IS_NOQUOTA(inode)) return 0; if (type != -1) - return inode->i_dquot[type] == NODQUOT; + return !inode->i_dquot[type]; for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) return 1; return 0; } @@ -840,8 +842,8 @@ static int remove_inode_dquot_ref(struct inode *inode, int type, { struct dquot *dquot = inode->i_dquot[type]; - inode->i_dquot[type] = NODQUOT; - if (dquot != NODQUOT) { + inode->i_dquot[type] = NULL; + if (dquot) { if (dqput_blocks(dquot)) { #ifdef __DQUOT_PARANOIA if (atomic_read(&dquot->dq_count) != 1) @@ -1112,7 +1114,7 @@ static inline void flush_warnings(struct dquot * const *dquots, char *warntype) int i; for (i = 0; i < MAXQUOTAS; i++) - if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && + if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && !warning_issued(dquots[i], warntype[i])) { #ifdef CONFIG_PRINT_QUOTA_WARNING print_warning(dquots[i], warntype[i]); @@ -1249,7 +1251,7 @@ int dquot_initialize(struct inode *inode, int type) { unsigned int id = 0; int cnt, ret = 0; - struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; + struct dquot *got[MAXQUOTAS] = { NULL, NULL }; struct super_block *sb = inode->i_sb; /* First test before acquiring mutex - solves deadlocks when we @@ -1282,9 +1284,9 @@ int dquot_initialize(struct inode *inode, int type) /* Avoid races with quotaoff() */ if (!sb_has_quota_active(sb, cnt)) continue; - if (inode->i_dquot[cnt] == NODQUOT) { + if (!inode->i_dquot[cnt]) { inode->i_dquot[cnt] = got[cnt]; - got[cnt] = NODQUOT; + got[cnt] = NULL; } } out_err: @@ -1307,7 +1309,7 @@ int dquot_drop(struct inode *inode) down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { put[cnt] = inode->i_dquot[cnt]; - inode->i_dquot[cnt] = NODQUOT; + inode->i_dquot[cnt] = NULL; } up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1332,7 +1334,7 @@ void vfs_dq_drop(struct inode *inode) * must assure that nobody can come after the DQUOT_DROP and * add quota pointers back anyway */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] != NODQUOT) + if (inode->i_dquot[cnt]) break; if (cnt < MAXQUOTAS) inode->i_sb->dq_op->drop(inode); @@ -1363,7 +1365,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA) { @@ -1372,7 +1374,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, } } for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; if (reserve) dquot_resv_space(inode->i_dquot[cnt], number); @@ -1461,14 +1463,14 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number) } spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) goto warn_put_all; } for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; dquot_incr_inodes(inode->i_dquot[cnt], number); } @@ -1506,7 +1508,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number) spin_lock(&dq_data_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] != NODQUOT) + if (inode->i_dquot[cnt]) dquot_claim_reserved_space(inode->i_dquot[cnt], number); } @@ -1540,7 +1542,7 @@ void dquot_release_reserved_space(struct inode *inode, qsize_t number) spin_lock(&dq_data_lock); /* Release reserved dquots */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] != NODQUOT) + if (inode->i_dquot[cnt]) dquot_free_reserved_space(inode->i_dquot[cnt], number); } spin_unlock(&dq_data_lock); @@ -1576,7 +1578,7 @@ out_sub: } spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); dquot_decr_space(inode->i_dquot[cnt], number); @@ -1614,7 +1616,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) } spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); dquot_decr_inodes(inode->i_dquot[cnt], number); @@ -1667,8 +1669,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) return QUOTA_OK; /* Initialize the arrays */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - transfer_from[cnt] = NODQUOT; - transfer_to[cnt] = NODQUOT; + transfer_from[cnt] = NULL; + transfer_to[cnt] = NULL; warntype_to[cnt] = QUOTA_NL_NOWARN; switch (cnt) { case USRQUOTA: @@ -1696,7 +1698,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) space = cur_space + rsv_space; /* Build the transfer_from list and check the limits */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (transfer_to[cnt] == NODQUOT) + if (!transfer_to[cnt]) continue; transfer_from[cnt] = inode->i_dquot[cnt]; if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == @@ -1712,7 +1714,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) /* * Skip changes for same uid or gid or for turned off quota-type. */ - if (transfer_to[cnt] == NODQUOT) + if (!transfer_to[cnt]) continue; /* Due to IO error we might not have transfer_from[] structure */ @@ -1743,7 +1745,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) if (transfer_to[cnt]) { mark_dquot_dirty(transfer_to[cnt]); /* The reference we got is transferred to the inode */ - transfer_to[cnt] = NODQUOT; + transfer_to[cnt] = NULL; } } warn_put_all: @@ -1761,7 +1763,7 @@ over_quota: up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); /* Clear dquot pointers we don't want to dqput() */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) - transfer_from[cnt] = NODQUOT; + transfer_from[cnt] = NULL; ret = NO_QUOTA; goto warn_put_all; } @@ -2256,7 +2258,7 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d struct dquot *dquot; dquot = dqget(sb, id, type); - if (dquot == NODQUOT) + if (!dquot) return -ESRCH; do_get_dqblk(dquot, di); dqput(dquot); diff --git a/include/linux/quota.h b/include/linux/quota.h index a510d91561f..78c48895b12 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -277,8 +277,6 @@ struct dquot { struct mem_dqblk dq_dqb; /* Diskquota usage */ }; -#define NODQUOT (struct dquot *)NULL - #define QUOTA_OK 0 #define NO_QUOTA 1 -- cgit v1.2.3-70-g09d2 From d26ac1a8128f6e1fc467f145acaa9f9bf9e16b75 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:17:50 +0100 Subject: quota: Remove dqbuf_t and other cleanups Remove bogus typedef which is just a definition of char *. Remove unnecessary type casts. Substitute freedqbuf() with kfree. Signed-off-by: Jan Kara --- fs/quota/quota_tree.c | 104 ++++++++++++++++++++++++-------------------------- 1 file changed, 49 insertions(+), 55 deletions(-) (limited to 'fs') diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 953404c95b1..3a5a90c0896 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -22,8 +22,6 @@ MODULE_LICENSE("GPL"); #define __QUOTA_QT_PARANOIA -typedef char *dqbuf_t; - static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) { unsigned int epb = info->dqi_usable_bs >> 2; @@ -41,40 +39,35 @@ static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) / info->dqi_entry_size; } -static dqbuf_t getdqbuf(size_t size) +static char *getdqbuf(size_t size) { - dqbuf_t buf = kmalloc(size, GFP_NOFS); + char *buf = kmalloc(size, GFP_NOFS); if (!buf) printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); return buf; } -static inline void freedqbuf(dqbuf_t buf) -{ - kfree(buf); -} - -static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { struct super_block *sb = info->dqi_sb; memset(buf, 0, info->dqi_usable_bs); - return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, + return sb->s_op->quota_read(sb, info->dqi_type, buf, info->dqi_usable_bs, blk << info->dqi_blocksize_bits); } -static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { struct super_block *sb = info->dqi_sb; - return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, + return sb->s_op->quota_write(sb, info->dqi_type, buf, info->dqi_usable_bs, blk << info->dqi_blocksize_bits); } /* Remove empty block from list and return it */ static int get_free_dqblk(struct qtree_mem_dqinfo *info) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; int ret, blk; @@ -98,12 +91,12 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info) mark_info_dirty(info->dqi_sb, info->dqi_type); ret = blk; out_buf: - freedqbuf(buf); + kfree(buf); return ret; } /* Insert empty block to the list */ -static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) { struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; int err; @@ -120,9 +113,9 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) } /* Remove given block from the list of blocks with free entries */ -static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) { - dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + char *tmpbuf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; uint nextblk = le32_to_cpu(dh->dqdh_next_free); uint prevblk = le32_to_cpu(dh->dqdh_prev_free); @@ -153,21 +146,21 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint info->dqi_free_entry = nextblk; mark_info_dirty(info->dqi_sb, info->dqi_type); } - freedqbuf(tmpbuf); + kfree(tmpbuf); dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); /* No matter whether write succeeds block is out of list */ if (write_blk(info, blk, buf) < 0) printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); return 0; out_buf: - freedqbuf(tmpbuf); + kfree(tmpbuf); return err; } /* Insert given block to the beginning of list with free entries */ -static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) { - dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + char *tmpbuf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; int err; @@ -188,12 +181,12 @@ static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint if (err < 0) goto out_buf; } - freedqbuf(tmpbuf); + kfree(tmpbuf); info->dqi_free_entry = blk; mark_info_dirty(info->dqi_sb, info->dqi_type); return 0; out_buf: - freedqbuf(tmpbuf); + kfree(tmpbuf); return err; } @@ -215,7 +208,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, { uint blk, i; struct qt_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); char *ddquot; *err = 0; @@ -233,7 +226,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, blk = get_free_dqblk(info); if ((int)blk < 0) { *err = blk; - freedqbuf(buf); + kfree(buf); return 0; } memset(buf, 0, info->dqi_usable_bs); @@ -253,9 +246,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, } le16_add_cpu(&dh->dqdh_entries, 1); /* Find free structure in block */ - for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader); i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); - i++, ddquot += info->dqi_entry_size); + i++, ddquot += info->dqi_entry_size) + ; #ifdef __QUOTA_QT_PARANOIA if (i == qtree_dqstr_in_blk(info)) { printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " @@ -273,10 +267,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, dquot->dq_off = (blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; - freedqbuf(buf); + kfree(buf); return blk; out_buf: - freedqbuf(buf); + kfree(buf); return 0; } @@ -284,7 +278,7 @@ out_buf: static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint *treeblk, int depth) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0, newson = 0, newact = 0; __le32 *ref; uint newblk; @@ -333,7 +327,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, put_free_dqblk(info, buf, *treeblk); } out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -353,7 +347,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) int type = dquot->dq_type; struct super_block *sb = dquot->dq_sb; ssize_t ret; - dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); + char *ddquot = getdqbuf(info->dqi_entry_size); if (!ddquot) return -ENOMEM; @@ -364,15 +358,15 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) if (ret < 0) { printk(KERN_ERR "VFS: Error %zd occurred while " "creating quota.\n", ret); - freedqbuf(ddquot); + kfree(ddquot); return ret; } } spin_lock(&dq_data_lock); info->dqi_ops->mem2disk_dqblk(ddquot, dquot); spin_unlock(&dq_data_lock); - ret = sb->s_op->quota_write(sb, type, (char *)ddquot, - info->dqi_entry_size, dquot->dq_off); + ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, + dquot->dq_off); if (ret != info->dqi_entry_size) { printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", sb->s_id); @@ -382,7 +376,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) ret = 0; } dqstats.writes++; - freedqbuf(ddquot); + kfree(ddquot); return ret; } @@ -393,7 +387,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk) { struct qt_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0; if (!buf) @@ -444,7 +438,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, } dquot->dq_off = 0; /* Quota is now unattached */ out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -452,7 +446,7 @@ out_buf: static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint *blk, int depth) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0; uint newblk; __le32 *ref = (__le32 *)buf; @@ -475,9 +469,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, int i; ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); /* Block got empty? */ - for (i = 0; - i < (info->dqi_usable_bs >> 2) && !ref[i]; - i++); + for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) + ; /* Don't put the root block into the free block list */ if (i == (info->dqi_usable_bs >> 2) && *blk != QT_TREEOFF) { @@ -491,7 +484,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, } } out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -510,7 +503,7 @@ EXPORT_SYMBOL(qtree_delete_dquot); static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); loff_t ret = 0; int i; char *ddquot; @@ -522,9 +515,10 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); goto out_buf; } - for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader); i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); - i++, ddquot += info->dqi_entry_size); + i++, ddquot += info->dqi_entry_size) + ; if (i == qtree_dqstr_in_blk(info)) { printk(KERN_ERR "VFS: Quota for id %u referenced " "but not present.\n", dquot->dq_id); @@ -535,7 +529,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, qt_disk_dqdbheader) + i * info->dqi_entry_size; } out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -543,7 +537,7 @@ out_buf: static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk, int depth) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); loff_t ret = 0; __le32 *ref = (__le32 *)buf; @@ -563,7 +557,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, else ret = find_block_dqentry(info, dquot, blk); out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -579,7 +573,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) int type = dquot->dq_type; struct super_block *sb = dquot->dq_sb; loff_t offset; - dqbuf_t ddquot; + char *ddquot; int ret = 0; #ifdef __QUOTA_QT_PARANOIA @@ -607,8 +601,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) ddquot = getdqbuf(info->dqi_entry_size); if (!ddquot) return -ENOMEM; - ret = sb->s_op->quota_read(sb, type, (char *)ddquot, - info->dqi_entry_size, dquot->dq_off); + ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, + dquot->dq_off); if (ret != info->dqi_entry_size) { if (ret >= 0) ret = -EIO; @@ -616,7 +610,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) "structure for id %u.\n", dquot->dq_id); set_bit(DQ_FAKE_B, &dquot->dq_flags); memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); - freedqbuf(ddquot); + kfree(ddquot); goto out; } spin_lock(&dq_data_lock); @@ -627,7 +621,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) !dquot->dq_dqb.dqb_isoftlimit) set_bit(DQ_FAKE_B, &dquot->dq_flags); spin_unlock(&dq_data_lock); - freedqbuf(ddquot); + kfree(ddquot); out: dqstats.reads++; return ret; -- cgit v1.2.3-70-g09d2 From 9e3509e273ecc2a5f937c493f9bb71e5e41ac2e5 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:45:12 +0100 Subject: vfs: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: Alexander Viro --- fs/attr.c | 3 ++- fs/inode.c | 4 ++-- fs/namei.c | 22 +++++++++++----------- fs/open.c | 2 +- fs/super.c | 8 ++++---- fs/sync.c | 2 +- 6 files changed, 21 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/attr.c b/fs/attr.c index f4360192a93..9fe1b1bd30a 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -173,7 +173,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr) if (!error) { if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) - error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; + error = vfs_dq_transfer(inode, attr) ? + -EDQUOT : 0; if (!error) error = inode_setattr(inode, attr); } diff --git a/fs/inode.c b/fs/inode.c index 826fb0b9d1c..bb81bd515f8 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -284,7 +284,7 @@ void clear_inode(struct inode *inode) BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); inode_sync_wait(inode); - DQUOT_DROP(inode); + vfs_dq_drop(inode); if (inode->i_sb->s_op->clear_inode) inode->i_sb->s_op->clear_inode(inode); if (S_ISBLK(inode->i_mode) && inode->i_bdev) @@ -1158,7 +1158,7 @@ void generic_delete_inode(struct inode *inode) if (op->delete_inode) { void (*delete)(struct inode *) = op->delete_inode; if (!is_bad_inode(inode)) - DQUOT_INIT(inode); + vfs_dq_init(inode); /* Filesystems implementing their own * s_op->delete_inode are required to call * truncate_inode_pages and clear_inode() diff --git a/fs/namei.c b/fs/namei.c index bbc15c23755..8937f4e7817 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1470,7 +1470,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, error = security_inode_create(dir, dentry, mode); if (error) return error; - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->create(dir, dentry, mode, nd); if (!error) fsnotify_create(dir, dentry); @@ -1544,7 +1544,7 @@ int may_open(struct path *path, int acc_mode, int flag) error = security_path_truncate(path, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); if (!error) { - DQUOT_INIT(inode); + vfs_dq_init(inode); error = do_truncate(dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, @@ -1555,7 +1555,7 @@ int may_open(struct path *path, int acc_mode, int flag) return error; } else if (flag & FMODE_WRITE) - DQUOT_INIT(inode); + vfs_dq_init(inode); return 0; } @@ -1938,7 +1938,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) if (error) return error; - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); @@ -2037,7 +2037,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (error) return error; - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); @@ -2123,7 +2123,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) if (!dir->i_op->rmdir) return -EPERM; - DQUOT_INIT(dir); + vfs_dq_init(dir); mutex_lock(&dentry->d_inode->i_mutex); dentry_unhash(dentry); @@ -2210,7 +2210,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) if (!dir->i_op->unlink) return -EPERM; - DQUOT_INIT(dir); + vfs_dq_init(dir); mutex_lock(&dentry->d_inode->i_mutex); if (d_mountpoint(dentry)) @@ -2321,7 +2321,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) if (error) return error; - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); @@ -2405,7 +2405,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de return error; mutex_lock(&inode->i_mutex); - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->link(old_dentry, dir, new_dentry); mutex_unlock(&inode->i_mutex); if (!error) @@ -2604,8 +2604,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (!old_dir->i_op->rename) return -EPERM; - DQUOT_INIT(old_dir); - DQUOT_INIT(new_dir); + vfs_dq_init(old_dir); + vfs_dq_init(new_dir); old_name = fsnotify_oldname_init(old_dentry->d_name.name); diff --git a/fs/open.c b/fs/open.c index a3a78ceb2a2..75b61677daa 100644 --- a/fs/open.c +++ b/fs/open.c @@ -273,7 +273,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) if (!error) error = security_path_truncate(&path, length, 0); if (!error) { - DQUOT_INIT(inode); + vfs_dq_init(inode); error = do_truncate(path.dentry, length, 0, NULL); } diff --git a/fs/super.c b/fs/super.c index 6ce501447ad..0f9d17f2c75 100644 --- a/fs/super.c +++ b/fs/super.c @@ -197,7 +197,7 @@ void deactivate_super(struct super_block *s) if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { s->s_count -= S_BIAS-1; spin_unlock(&sb_lock); - DQUOT_OFF(s, 0); + vfs_dq_off(s, 0); down_write(&s->s_umount); fs->kill_sb(s); put_filesystem(fs); @@ -266,7 +266,7 @@ EXPORT_SYMBOL(unlock_super); void __fsync_super(struct super_block *sb) { sync_inodes_sb(sb, 0); - DQUOT_SYNC(sb); + vfs_dq_sync(sb); lock_super(sb); if (sb->s_dirt && sb->s_op->write_super) sb->s_op->write_super(sb); @@ -655,7 +655,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) mark_files_ro(sb); else if (!fs_may_remount_ro(sb)) return -EBUSY; - retval = DQUOT_OFF(sb, 1); + retval = vfs_dq_off(sb, 1); if (retval < 0 && retval != -ENOSYS) return -EBUSY; } @@ -670,7 +670,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) } sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); if (remount_rw) - DQUOT_ON_REMOUNT(sb); + vfs_dq_quota_on_remount(sb); return 0; } diff --git a/fs/sync.c b/fs/sync.c index a16d53e5fe9..ef36bc921bf 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -25,7 +25,7 @@ static void do_sync(unsigned long wait) { wakeup_pdflush(0); sync_inodes(0); /* All mappings, inodes and their blockdevs */ - DQUOT_SYNC(NULL); + vfs_dq_sync(NULL); sync_supers(); /* Write the superblocks */ sync_filesystems(0); /* Start syncing the filesystems */ sync_filesystems(wait); /* Waitingly sync the filesystems */ -- cgit v1.2.3-70-g09d2 From 314649558d2ff0927d102fa06c62557a92ce5c1d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:46:21 +0100 Subject: ramfs: Remove quota call Ramfs has no bussiness in quotas. Signed-off-by: Jan Kara --- fs/ramfs/file-nommu.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 5d7c7ececa6..995ef1d6686 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include @@ -205,11 +204,6 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) if (ret) return ret; - /* by providing our own setattr() method, we skip this quotaism */ - if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) || - (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid)) - ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0; - /* pick out size-changing events */ if (ia->ia_valid & ATTR_SIZE) { loff_t size = i_size_read(inode); -- cgit v1.2.3-70-g09d2 From 6f90bee5062a8af24d8aa5c47182d15aa28a0f17 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:52:06 +0100 Subject: ext2: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: linux-ext4@vger.kernel.org --- fs/ext2/balloc.c | 8 ++++---- fs/ext2/ialloc.c | 10 +++++----- fs/ext2/inode.c | 2 +- fs/ext2/xattr.c | 8 ++++---- 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 4a29d637608..7f8d2e5a7ea 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c @@ -570,7 +570,7 @@ do_more: error_return: brelse(bitmap_bh); release_blocks(sb, freed); - DQUOT_FREE_BLOCK(inode, freed); + vfs_dq_free_block(inode, freed); } /** @@ -1247,7 +1247,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal, /* * Check quota for allocation of this block. */ - if (DQUOT_ALLOC_BLOCK(inode, num)) { + if (vfs_dq_alloc_block(inode, num)) { *errp = -EDQUOT; return 0; } @@ -1409,7 +1409,7 @@ allocated: *errp = 0; brelse(bitmap_bh); - DQUOT_FREE_BLOCK(inode, *count-num); + vfs_dq_free_block(inode, *count-num); *count = num; return ret_block; @@ -1420,7 +1420,7 @@ out: * Undo the block allocation */ if (!performed_allocation) - DQUOT_FREE_BLOCK(inode, *count); + vfs_dq_free_block(inode, *count); brelse(bitmap_bh); return 0; } diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 66321a877e7..15387c9c17d 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode) if (!is_bad_inode(inode)) { /* Quota is already initialized in iput() */ ext2_xattr_delete_inode(inode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); } es = EXT2_SB(sb)->s_es; @@ -586,7 +586,7 @@ got: goto fail_drop; } - if (DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { err = -EDQUOT; goto fail_drop; } @@ -605,10 +605,10 @@ got: return inode; fail_free_drop: - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); fail_drop: - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; unlock_new_inode(inode); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 23fff2f8778..b43b9556366 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1444,7 +1444,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr) return error; if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { - error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0; + error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0; if (error) return error; } diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 987a5261cc2..7913531ec6d 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -642,7 +642,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, ea_bdebug(new_bh, "reusing block"); error = -EDQUOT; - if (DQUOT_ALLOC_BLOCK(inode, 1)) { + if (vfs_dq_alloc_block(inode, 1)) { unlock_buffer(new_bh); goto cleanup; } @@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, * as if nothing happened and cleanup the unused block */ if (error && error != -ENOSPC) { if (new_bh && new_bh != old_bh) - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); goto cleanup; } } else @@ -731,7 +731,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, le32_add_cpu(&HDR(old_bh)->h_refcount, -1); if (ce) mb_cache_entry_release(ce); - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); mark_buffer_dirty(old_bh); ea_bdebug(old_bh, "refcount now=%d", le32_to_cpu(HDR(old_bh)->h_refcount)); @@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode) mark_buffer_dirty(bh); if (IS_SYNC(inode)) sync_dirty_buffer(bh); - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); } EXT2_I(inode)->i_file_acl = 0; -- cgit v1.2.3-70-g09d2 From 81a052273998f94b098945c4c313e05246956eb2 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:58:01 +0100 Subject: ext3: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: linux-ext4@vger.kernel.org --- fs/ext3/balloc.c | 8 ++++---- fs/ext3/ialloc.c | 12 ++++++------ fs/ext3/inode.c | 6 +++--- fs/ext3/namei.c | 6 +++--- fs/ext3/super.c | 4 ++-- fs/ext3/xattr.c | 6 +++--- 6 files changed, 21 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 0dbf1c04847..225202db897 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode, } ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); if (dquot_freed_blocks) - DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); + vfs_dq_free_block(inode, dquot_freed_blocks); return; } @@ -1502,7 +1502,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, /* * Check quota for allocation of this block. */ - if (DQUOT_ALLOC_BLOCK(inode, num)) { + if (vfs_dq_alloc_block(inode, num)) { *errp = -EDQUOT; return 0; } @@ -1714,7 +1714,7 @@ allocated: *errp = 0; brelse(bitmap_bh); - DQUOT_FREE_BLOCK(inode, *count-num); + vfs_dq_free_block(inode, *count-num); *count = num; return ret_block; @@ -1729,7 +1729,7 @@ out: * Undo the block allocation */ if (!performed_allocation) - DQUOT_FREE_BLOCK(inode, *count); + vfs_dq_free_block(inode, *count); brelse(bitmap_bh); return 0; } diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 8de6c720e51..dd13d60d524 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c @@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode) * Note: we must free any quota before locking the superblock, * as writing the quota to disk may need the lock as well. */ - DQUOT_INIT(inode); + vfs_dq_init(inode); ext3_xattr_delete_inode(handle, inode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); is_directory = S_ISDIR(inode->i_mode); @@ -589,7 +589,7 @@ got: sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; ret = inode; - if(DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { err = -EDQUOT; goto fail_drop; } @@ -620,10 +620,10 @@ really_out: return ret; fail_free_drop: - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); fail_drop: - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; unlock_new_inode(inode); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 5fa453b49a6..c8f9bd30882 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -3055,7 +3055,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) error = PTR_ERR(handle); goto err_out; } - error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; + error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; if (error) { ext3_journal_stop(handle); return error; @@ -3146,7 +3146,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode) ret = 2 * (bpp + indirects) + 2; #ifdef CONFIG_QUOTA - /* We know that structure was already allocated during DQUOT_INIT so + /* We know that structure was already allocated during vfs_dq_init so * we will be updating only the data blocks + inodes */ ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); #endif @@ -3237,7 +3237,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * - * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks + * Also, vfs_dq_alloc_space() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 4db4ffa1eda..e2fc63cbba8 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -2049,7 +2049,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go in * separate transaction */ - DQUOT_INIT(dentry->d_inode); + vfs_dq_init(dentry->d_inode); handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2108,7 +2108,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go * in separate transaction */ - DQUOT_INIT(dentry->d_inode); + vfs_dq_init(dentry->d_inode); handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2272,7 +2272,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new_dentry->d_inode) - DQUOT_INIT(new_dentry->d_inode); + vfs_dq_init(new_dentry->d_inode); handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 41e6ae605e0..9e5b8e387e1 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1436,7 +1436,7 @@ static void ext3_orphan_cleanup (struct super_block * sb, } list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); - DQUOT_INIT(inode); + vfs_dq_init(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %Ld bytes\n", @@ -2700,7 +2700,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() - * DQUOT_INIT() down(dqio_mutex) + * vfs_dq_init() down(dqio_mutex) * down(dqio_mutex) journal_start() * */ diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index 175414ac221..83b7be849bd 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c @@ -498,7 +498,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode, error = ext3_journal_dirty_metadata(handle, bh); if (IS_SYNC(inode)) handle->h_sync = 1; - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); if (ce) @@ -774,7 +774,7 @@ inserted: /* The old block is released after updating the inode. */ error = -EDQUOT; - if (DQUOT_ALLOC_BLOCK(inode, 1)) + if (vfs_dq_alloc_block(inode, 1)) goto cleanup; error = ext3_journal_get_write_access(handle, new_bh); @@ -848,7 +848,7 @@ cleanup: return error; cleanup_dquot: - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); goto cleanup; bad_block: -- cgit v1.2.3-70-g09d2 From a269eb18294d35874c53311acc2cd0b5ef477ce5 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:04:39 +0100 Subject: ext4: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara Acked-by: Mingming Cao CC: linux-ext4@vger.kernel.org --- fs/ext4/balloc.c | 2 +- fs/ext4/ialloc.c | 12 ++++++------ fs/ext4/inode.c | 4 ++-- fs/ext4/mballoc.c | 4 ++-- fs/ext4/namei.c | 6 +++--- fs/ext4/super.c | 6 +++--- fs/ext4/xattr.c | 6 +++--- 7 files changed, 20 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index de9459b4cb9..38f40d55899 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -536,7 +536,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ext4_mb_free_blocks(handle, inode, block, count, metadata, &dquot_freed_blocks); if (dquot_freed_blocks) - DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); + vfs_dq_free_block(inode, dquot_freed_blocks); return; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 2d2b3585ee9..fb51b40e3e8 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -220,10 +220,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) * Note: we must free any quota before locking the superblock, * as writing the quota to disk may need the lock as well. */ - DQUOT_INIT(inode); + vfs_dq_init(inode); ext4_xattr_delete_inode(handle, inode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); is_directory = S_ISDIR(inode->i_mode); @@ -915,7 +915,7 @@ got: ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; ret = inode; - if (DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { err = -EDQUOT; goto fail_drop; } @@ -956,10 +956,10 @@ really_out: return ret; fail_free_drop: - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); fail_drop: - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; unlock_new_inode(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 8290cfbd9fa..71d3ecd5db7 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4642,7 +4642,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) error = PTR_ERR(handle); goto err_out; } - error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; + error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; if (error) { ext4_journal_stop(handle); return error; @@ -5021,7 +5021,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * - * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks + * Also, vfs_dq_alloc_block() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4de42090c41..b038188bd03 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4587,7 +4587,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, return 0; } reserv_blks = ar->len; - while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { + while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) { ar->flags |= EXT4_MB_HINT_NOPREALLOC; ar->len--; } @@ -4663,7 +4663,7 @@ out2: kmem_cache_free(ext4_ac_cachep, ac); out1: if (inquota && ar->len < inquota) - DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); + vfs_dq_free_block(ar->inode, inquota - ar->len); out3: if (!ar->len) { if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index ba702bd7910..83410244d3e 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2092,7 +2092,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go in * separate transaction */ - DQUOT_INIT(dentry->d_inode); + vfs_dq_init(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2151,7 +2151,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go * in separate transaction */ - DQUOT_INIT(dentry->d_inode); + vfs_dq_init(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2318,7 +2318,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new_dentry->d_inode) - DQUOT_INIT(new_dentry->d_inode); + vfs_dq_init(new_dentry->d_inode); handle = ext4_journal_start(old_dir, 2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5a238e9c71c..f7371a6a923 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1804,7 +1804,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, } list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); - DQUOT_INIT(inode); + vfs_dq_init(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %lld bytes\n", @@ -3369,8 +3369,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) * is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext4_create() quota_sync() - * jbd2_journal_start() write_dquot() - * DQUOT_INIT() down(dqio_mutex) + * jbd2_journal_start() write_dquot() + * vfs_dq_init() down(dqio_mutex) * down(dqio_mutex) jbd2_journal_start() * */ diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 157ce6589c5..62b31c24699 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -490,7 +490,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, error = ext4_handle_dirty_metadata(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); if (ce) @@ -784,7 +784,7 @@ inserted: /* The old block is released after updating the inode. */ error = -EDQUOT; - if (DQUOT_ALLOC_BLOCK(inode, 1)) + if (vfs_dq_alloc_block(inode, 1)) goto cleanup; error = ext4_journal_get_write_access(handle, new_bh); @@ -860,7 +860,7 @@ cleanup: return error; cleanup_dquot: - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); goto cleanup; bad_block: -- cgit v1.2.3-70-g09d2 From 77db4f25bca22ce96be0cd3c5a3160599817ff45 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:14:18 +0100 Subject: reiserfs: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: reiserfs-devel@vger.kernel.org --- fs/reiserfs/bitmap.c | 14 ++++++++------ fs/reiserfs/inode.c | 10 +++++----- fs/reiserfs/namei.c | 6 +++--- fs/reiserfs/stree.c | 14 +++++++------- fs/reiserfs/super.c | 2 +- 5 files changed, 24 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index 4646caa6045..f32d1425cc9 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c @@ -430,7 +430,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th, journal_mark_dirty(th, s, sbh); if (for_unformatted) - DQUOT_FREE_BLOCK_NODIRTY(inode, 1); + vfs_dq_free_block_nodirty(inode, 1); } void reiserfs_free_block(struct reiserfs_transaction_handle *th, @@ -1055,7 +1055,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start amount_needed, hint->inode->i_uid); #endif quota_ret = - DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed); + vfs_dq_alloc_block_nodirty(hint->inode, amount_needed); if (quota_ret) /* Quota exceeded? */ return QUOTA_EXCEEDED; if (hint->preallocate && hint->prealloc_size) { @@ -1064,8 +1064,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start "reiserquota: allocating (prealloc) %d blocks id=%u", hint->prealloc_size, hint->inode->i_uid); #endif - quota_ret = - DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode, + quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode, hint->prealloc_size); if (quota_ret) hint->preallocate = hint->prealloc_size = 0; @@ -1098,7 +1097,10 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start nr_allocated, hint->inode->i_uid); #endif - DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */ + /* Free not allocated blocks */ + vfs_dq_free_block_nodirty(hint->inode, + amount_needed + hint->prealloc_size - + nr_allocated); } while (nr_allocated--) reiserfs_free_block(hint->th, hint->inode, @@ -1129,7 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start REISERFS_I(hint->inode)->i_prealloc_count, hint->inode->i_uid); #endif - DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + + vfs_dq_free_block_nodirty(hint->inode, amount_needed + hint->prealloc_size - nr_allocated - REISERFS_I(hint->inode)-> i_prealloc_count); diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 55fce92cdf1..823227a7662 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -53,7 +53,7 @@ void reiserfs_delete_inode(struct inode *inode) * after delete_object so that quota updates go into the same transaction as * stat data deletion */ if (!err) - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); if (journal_end(&th, inode->i_sb, jbegin_count)) goto out; @@ -1763,7 +1763,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, BUG_ON(!th->t_trans_id); - if (DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { err = -EDQUOT; goto out_end_trans; } @@ -1947,12 +1947,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, INODE_PKEY(inode)->k_objectid = 0; /* Quota change must be inside a transaction for journaling */ - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); out_end_trans: journal_end(th, th->t_super, th->t_blocks_allocated); /* Drop can be outside and it needs more credits so it's better to have it outside */ - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; make_bad_inode(inode); @@ -3119,7 +3119,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) if (error) goto out; error = - DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; + vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; if (error) { journal_end(&th, inode->i_sb, jbegin_count); diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 738967f6c8e..639d635d9d4 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -555,7 +555,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th, */ static int drop_new_inode(struct inode *inode) { - DQUOT_DROP(inode); + vfs_dq_drop(inode); make_bad_inode(inode); inode->i_flags |= S_NOQUOTA; iput(inode); @@ -563,7 +563,7 @@ static int drop_new_inode(struct inode *inode) } /* utility function that does setup for reiserfs_new_inode. -** DQUOT_INIT needs lots of credits so it's better to have it +** vfs_dq_init needs lots of credits so it's better to have it ** outside of a transaction, so we had to pull some bits of ** reiserfs_new_inode out into this func. */ @@ -586,7 +586,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode) } else { inode->i_gid = current_fsgid(); } - DQUOT_INIT(inode); + vfs_dq_init(inode); return 0; } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index abbc64dcc8d..73aaa33f673 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1297,7 +1297,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath "reiserquota delete_item(): freeing %u, id=%u type=%c", quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih)); #endif - DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes); + vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes); /* Return deleted body length */ return n_ret_value; @@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, quota_cut_bytes, inode->i_uid, key2type(key)); #endif - DQUOT_FREE_SPACE_NODIRTY(inode, + vfs_dq_free_space_nodirty(inode, quota_cut_bytes); } break; @@ -1734,7 +1734,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, "reiserquota cut_from_item(): freeing %u id=%u type=%c", quota_cut_bytes, p_s_inode->i_uid, '?'); #endif - DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes); + vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes); return n_ret_value; } @@ -1971,7 +1971,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree key2type(&(p_s_key->on_disk_key))); #endif - if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) { + if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) { pathrelse(p_s_search_path); return -EDQUOT; } @@ -2027,7 +2027,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree n_pasted_size, inode->i_uid, key2type(&(p_s_key->on_disk_key))); #endif - DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size); + vfs_dq_free_space_nodirty(inode, n_pasted_size); return retval; } @@ -2060,7 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath #endif /* We can't dirty inode here. It would be immediately written but * appropriate stat item isn't inserted yet... */ - if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) { + if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) { pathrelse(p_s_path); return -EDQUOT; } @@ -2112,6 +2112,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath quota_bytes, inode->i_uid, head2type(p_s_ih)); #endif if (inode) - DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes); + vfs_dq_free_space_nodirty(inode, quota_bytes); return retval; } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 317c0352163..5dbafb73940 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -250,7 +250,7 @@ static int finish_unfinished(struct super_block *s) retval = remove_save_link_only(s, &save_link_key, 0); continue; } - DQUOT_INIT(inode); + vfs_dq_init(inode); if (truncate && S_ISDIR(inode->i_mode)) { /* We got a truncate request for a dir which is impossible. -- cgit v1.2.3-70-g09d2 From 5f5fa796c6b0158b50a2d5d2f80a926466ea87b3 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:15:30 +0100 Subject: ufs: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: Evgeniy Dushistov --- fs/ufs/balloc.c | 12 ++++++------ fs/ufs/ialloc.c | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 0d9ada17373..54c16ec95df 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) "bit already cleared for fragment %u", i); } - DQUOT_FREE_BLOCK (inode, count); + vfs_dq_free_block(inode, count); fs32_add(sb, &ucg->cg_cs.cs_nffree, count); @@ -195,7 +195,7 @@ do_more: ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); - DQUOT_FREE_BLOCK(inode, uspi->s_fpb); + vfs_dq_free_block(inode, uspi->s_fpb); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; @@ -556,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); for (i = oldcount; i < newcount; i++) ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); - if(DQUOT_ALLOC_BLOCK(inode, count)) { + if (vfs_dq_alloc_block(inode, count)) { *err = -EDQUOT; return 0; } @@ -664,7 +664,7 @@ cg_found: for (i = count; i < uspi->s_fpb; i++) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); i = uspi->s_fpb - count; - DQUOT_FREE_BLOCK(inode, i); + vfs_dq_free_block(inode, i); fs32_add(sb, &ucg->cg_cs.cs_nffree, i); uspi->cs_total.cs_nffree += i; @@ -676,7 +676,7 @@ cg_found: result = ufs_bitmap_search (sb, ucpi, goal, allocsize); if (result == INVBLOCK) return 0; - if(DQUOT_ALLOC_BLOCK(inode, count)) { + if (vfs_dq_alloc_block(inode, count)) { *err = -EDQUOT; return 0; } @@ -747,7 +747,7 @@ gotit: ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, -1); - if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { + if (vfs_dq_alloc_block(inode, uspi->s_fpb)) { *err = -EDQUOT; return INVBLOCK; } diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 6f5dcf00609..3527c00fef0 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c @@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode) is_directory = S_ISDIR(inode->i_mode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); clear_inode (inode); @@ -355,8 +355,8 @@ cg_found: unlock_super (sb); - if (DQUOT_ALLOC_INODE(inode)) { - DQUOT_DROP(inode); + if (vfs_dq_alloc_inode(inode)) { + vfs_dq_drop(inode); err = -EDQUOT; goto fail_without_unlock; } -- cgit v1.2.3-70-g09d2 From bacfb7c2e5d10f40f7adb23aeeffc824b839eaa8 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:20:46 +0100 Subject: udf: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara --- fs/udf/balloc.c | 14 +++++++------- fs/udf/ialloc.c | 8 ++++---- 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 1b809bd494b..2bb788a2acb 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -206,7 +206,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb, ((char *)bh->b_data)[(bit + i) >> 3]); } else { if (inode) - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); udf_add_free_space(sbi, sbi->s_partition, 1); } } @@ -261,11 +261,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb, while (bit < (sb->s_blocksize << 3) && block_count > 0) { if (!udf_test_bit(bit, bh->b_data)) goto out; - else if (DQUOT_PREALLOC_BLOCK(inode, 1)) + else if (vfs_dq_prealloc_block(inode, 1)) goto out; else if (!udf_clear_bit(bit, bh->b_data)) { udf_debug("bit already cleared for block %d\n", bit); - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); goto out; } block_count--; @@ -393,7 +393,7 @@ got_block: /* * Check quota for allocation of this block. */ - if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { + if (inode && vfs_dq_alloc_block(inode, 1)) { mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; return 0; @@ -452,7 +452,7 @@ static void udf_table_free_blocks(struct super_block *sb, /* We do this up front - There are some error conditions that could occure, but.. oh well */ if (inode) - DQUOT_FREE_BLOCK(inode, count); + vfs_dq_free_block(inode, count); if (udf_add_free_space(sbi, sbi->s_partition, count)) mark_buffer_dirty(sbi->s_lvid_bh); @@ -700,7 +700,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb, epos.offset -= adsize; alloc_count = (elen >> sb->s_blocksize_bits); - if (inode && DQUOT_PREALLOC_BLOCK(inode, + if (inode && vfs_dq_prealloc_block(inode, alloc_count > block_count ? block_count : alloc_count)) alloc_count = 0; else if (alloc_count > block_count) { @@ -806,7 +806,7 @@ static int udf_table_new_block(struct super_block *sb, goal_eloc.logicalBlockNum++; goal_elen -= sb->s_blocksize; - if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { + if (inode && vfs_dq_alloc_block(inode, 1)) { brelse(goal_epos.bh); mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 31fc84297dd..47dbe5613f9 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode) * Note: we must free any quota before locking the superblock, * as writing the quota to disk may need the lock as well. */ - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); clear_inode(inode); @@ -154,8 +154,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) insert_inode_hash(inode); mark_inode_dirty(inode); - if (DQUOT_ALLOC_INODE(inode)) { - DQUOT_DROP(inode); + if (vfs_dq_alloc_inode(inode)) { + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); -- cgit v1.2.3-70-g09d2 From c94d2a22f26bdb11d3dd817669b940a8c76a8cad Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:22:32 +0100 Subject: jfs: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara Acked-by: Dave Kleikamp --- fs/jfs/acl.c | 2 +- fs/jfs/inode.c | 6 +++--- fs/jfs/jfs_dtree.c | 18 +++++++++--------- fs/jfs/jfs_extent.c | 10 +++++----- fs/jfs/jfs_inode.c | 4 ++-- fs/jfs/jfs_xtree.c | 14 +++++++------- fs/jfs/namei.c | 6 +++--- fs/jfs/xattr.c | 12 ++++++------ 8 files changed, 36 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index d3e5c33665d..a166c1669e8 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -233,7 +233,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr) if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { - if (DQUOT_TRANSFER(inode, iattr)) + if (vfs_dq_transfer(inode, iattr)) return -EDQUOT; } diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index b00ee9f05a0..b2ae190a77b 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -158,9 +158,9 @@ void jfs_delete_inode(struct inode *inode) /* * Free the inode from the quota allocation. */ - DQUOT_INIT(inode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_init(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); } clear_inode(inode); diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 4dcc0581999..925871e9887 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) * It's time to move the inline table to an external * page and begin to build the xtree */ - if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage)) + if (vfs_dq_alloc_block(ip, sbi->nbperpage)) goto clean_up; if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { - DQUOT_FREE_BLOCK(ip, sbi->nbperpage); + vfs_dq_free_block(ip, sbi->nbperpage); goto clean_up; } @@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) memcpy(&jfs_ip->i_dirtable, temp_table, sizeof (temp_table)); dbFree(ip, xaddr, sbi->nbperpage); - DQUOT_FREE_BLOCK(ip, sbi->nbperpage); + vfs_dq_free_block(ip, sbi->nbperpage); goto clean_up; } ip->i_size = PSIZE; @@ -1027,7 +1027,7 @@ static int dtSplitUp(tid_t tid, n = xlen; /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, n)) { + if (vfs_dq_alloc_block(ip, n)) { rc = -EDQUOT; goto extendOut; } @@ -1308,7 +1308,7 @@ static int dtSplitUp(tid_t tid, /* Rollback quota allocation */ if (rc && quota_allocation) - DQUOT_FREE_BLOCK(ip, quota_allocation); + vfs_dq_free_block(ip, quota_allocation); dtSplitUp_Exit: @@ -1369,7 +1369,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split, return -EIO; /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { + if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { release_metapage(rmp); return -EDQUOT; } @@ -1916,7 +1916,7 @@ static int dtSplitRoot(tid_t tid, rp = rmp->data; /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { + if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { release_metapage(rmp); return -EDQUOT; } @@ -2287,7 +2287,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip, xlen = lengthPXD(&fp->header.self); /* Free quota allocation. */ - DQUOT_FREE_BLOCK(ip, xlen); + vfs_dq_free_block(ip, xlen); /* free/invalidate its buffer page */ discard_metapage(fmp); @@ -2363,7 +2363,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip, xlen = lengthPXD(&p->header.self); /* Free quota allocation */ - DQUOT_FREE_BLOCK(ip, xlen); + vfs_dq_free_block(ip, xlen); /* free/invalidate its buffer page */ discard_metapage(mp); diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c index 7ae1e3281de..169802ea07f 100644 --- a/fs/jfs/jfs_extent.c +++ b/fs/jfs/jfs_extent.c @@ -141,7 +141,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr) } /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { + if (vfs_dq_alloc_block(ip, nxlen)) { dbFree(ip, nxaddr, (s64) nxlen); mutex_unlock(&JFS_IP(ip)->commit_mutex); return -EDQUOT; @@ -164,7 +164,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr) */ if (rc) { dbFree(ip, nxaddr, nxlen); - DQUOT_FREE_BLOCK(ip, nxlen); + vfs_dq_free_block(ip, nxlen); mutex_unlock(&JFS_IP(ip)->commit_mutex); return (rc); } @@ -256,7 +256,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) goto exit; /* Allocat blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { + if (vfs_dq_alloc_block(ip, nxlen)) { dbFree(ip, nxaddr, (s64) nxlen); mutex_unlock(&JFS_IP(ip)->commit_mutex); return -EDQUOT; @@ -297,7 +297,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) /* extend the extent */ if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) { dbFree(ip, xaddr + xlen, delta); - DQUOT_FREE_BLOCK(ip, nxlen); + vfs_dq_free_block(ip, nxlen); goto exit; } } else { @@ -308,7 +308,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) */ if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) { dbFree(ip, nxaddr, nxlen); - DQUOT_FREE_BLOCK(ip, nxlen); + vfs_dq_free_block(ip, nxlen); goto exit; } } diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index d4d142c2edd..dc0e02159ac 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c @@ -116,7 +116,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode) /* * Allocate inode to quota. */ - if (DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { rc = -EDQUOT; goto fail_drop; } @@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode) return inode; fail_drop: - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; fail_unlock: inode->i_nlink = 0; diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c index ae3acafb447..a27e26c9056 100644 --- a/fs/jfs/jfs_xtree.c +++ b/fs/jfs/jfs_xtree.c @@ -846,10 +846,10 @@ int xtInsert(tid_t tid, /* transaction id */ hint = addressXAD(xad) + lengthXAD(xad) - 1; } else hint = 0; - if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen))) + if ((rc = vfs_dq_alloc_block(ip, xlen))) goto out; if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { - DQUOT_FREE_BLOCK(ip, xlen); + vfs_dq_free_block(ip, xlen); goto out; } } @@ -878,7 +878,7 @@ int xtInsert(tid_t tid, /* transaction id */ /* undo data extent allocation */ if (*xaddrp == 0) { dbFree(ip, xaddr, (s64) xlen); - DQUOT_FREE_BLOCK(ip, xlen); + vfs_dq_free_block(ip, xlen); } return rc; } @@ -1246,7 +1246,7 @@ xtSplitPage(tid_t tid, struct inode *ip, rbn = addressPXD(pxd); /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { + if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { rc = -EDQUOT; goto clean_up; } @@ -1456,7 +1456,7 @@ xtSplitPage(tid_t tid, struct inode *ip, /* Rollback quota allocation. */ if (quota_allocation) - DQUOT_FREE_BLOCK(ip, quota_allocation); + vfs_dq_free_block(ip, quota_allocation); return (rc); } @@ -1513,7 +1513,7 @@ xtSplitRoot(tid_t tid, return -EIO; /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { + if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { release_metapage(rmp); return -EDQUOT; } @@ -3941,7 +3941,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag) ip->i_size = newsize; /* update quota allocation to reflect freed blocks */ - DQUOT_FREE_BLOCK(ip, nfreed); + vfs_dq_free_block(ip, nfreed); /* * free tlock of invalidated pages diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index b4de56b851e..9feaa04555d 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -356,7 +356,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); /* Init inode for quota operations. */ - DQUOT_INIT(ip); + vfs_dq_init(ip); /* directory must be empty to be removed */ if (!dtEmpty(ip)) { @@ -483,7 +483,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); /* Init inode for quota operations. */ - DQUOT_INIT(ip); + vfs_dq_init(ip); if ((rc = get_UCSname(&dname, dentry))) goto out; @@ -1136,7 +1136,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, } else if (new_ip) { IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); /* Init inode for quota operations. */ - DQUOT_INIT(new_ip); + vfs_dq_init(new_ip); } /* diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 9b7f2cdaae0..61dfa8173eb 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; /* Allocate new blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, nblocks)) { + if (vfs_dq_alloc_block(ip, nblocks)) { return -EDQUOT; } rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); if (rc) { /*Rollback quota allocation. */ - DQUOT_FREE_BLOCK(ip, nblocks); + vfs_dq_free_block(ip, nblocks); return rc; } @@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, failed: /* Rollback quota allocation. */ - DQUOT_FREE_BLOCK(ip, nblocks); + vfs_dq_free_block(ip, nblocks); dbFree(ip, blkno, nblocks); return rc; @@ -538,7 +538,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) if (blocks_needed > current_blocks) { /* Allocate new blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(inode, blocks_needed)) + if (vfs_dq_alloc_block(inode, blocks_needed)) return -EDQUOT; quota_allocation = blocks_needed; @@ -602,7 +602,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) clean_up: /* Rollback quota allocation */ if (quota_allocation) - DQUOT_FREE_BLOCK(inode, quota_allocation); + vfs_dq_free_block(inode, quota_allocation); return (rc); } @@ -677,7 +677,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf, /* If old blocks exist, they must be removed from quota allocation. */ if (old_blocks) - DQUOT_FREE_BLOCK(inode, old_blocks); + vfs_dq_free_block(inode, old_blocks); inode->i_ctime = CURRENT_TIME; -- cgit v1.2.3-70-g09d2 From 90c0af05a5a486f7709195e20bb26ad8f67ba5af Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 9 Feb 2009 22:22:21 +0100 Subject: nfsd: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. CC: bfields@fieldses.org CC: neilb@suse.de Signed-off-by: Jan Kara --- fs/nfsd/vfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 6e50aaa56ca..ad38fc9e581 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -356,7 +356,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, put_write_access(inode); goto out_nfserr; } - DQUOT_INIT(inode); + vfs_dq_init(inode); } /* sanitize the mode change */ @@ -723,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, else flags = O_WRONLY|O_LARGEFILE; - DQUOT_INIT(inode); + vfs_dq_init(inode); } *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), flags, cred); -- cgit v1.2.3-70-g09d2 From 7a2435d874388271cfe5046d180751352a1d30a2 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 27 Jan 2009 01:47:11 +0100 Subject: quota: Remove superfluous inlines Remove inlines of large functions to decrease code size (saved 1543 bytes). Signed-off-by: Jan Kara --- fs/quota/dquot.c | 13 +++++++------ fs/quota/quota.c | 2 +- fs/quota/quota_tree.c | 6 +++--- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 4881db32e56..4a37b8e32b4 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -243,7 +243,8 @@ static inline void remove_dquot_hash(struct dquot *dquot) hlist_del_init(&dquot->dq_hash); } -static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) +static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, + unsigned int id, int type) { struct hlist_node *node; struct dquot *dquot; @@ -935,7 +936,7 @@ void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_rsvspace -= number; } -static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) +static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || dquot->dq_dqb.dqb_curinodes >= number) @@ -947,7 +948,7 @@ static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) clear_bit(DQ_INODES_B, &dquot->dq_flags); } -static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) +static void dquot_decr_space(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || dquot->dq_dqb.dqb_curspace >= number) @@ -974,7 +975,7 @@ static int warning_issued(struct dquot *dquot, const int warntype) #ifdef CONFIG_PRINT_QUOTA_WARNING static int flag_print_warnings = 1; -static inline int need_print_warning(struct dquot *dquot) +static int need_print_warning(struct dquot *dquot) { if (!flag_print_warnings) return 0; @@ -1109,7 +1110,7 @@ err_out: * * Note that this function can sleep. */ -static inline void flush_warnings(struct dquot * const *dquots, char *warntype) +static void flush_warnings(struct dquot *const *dquots, char *warntype) { int i; @@ -1125,7 +1126,7 @@ static inline void flush_warnings(struct dquot * const *dquots, char *warntype) } } -static inline char ignore_hardlimit(struct dquot *dquot) +static int ignore_hardlimit(struct dquot *dquot) { struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; diff --git a/fs/quota/quota.c b/fs/quota/quota.c index d76ada914f9..89541bcbe27 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -341,7 +341,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void * look up a superblock on which quota ops will be performed * - use the name of a block device to find the superblock thereon */ -static inline struct super_block *quotactl_block(const char __user *special) +static struct super_block *quotactl_block(const char __user *special) { #ifdef CONFIG_BLOCK struct block_device *bdev; diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 3a5a90c0896..48e35a48f1c 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -33,7 +33,7 @@ static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) } /* Number of entries in one blocks */ -static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) +static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) { return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) / info->dqi_entry_size; @@ -47,7 +47,7 @@ static char *getdqbuf(size_t size) return buf; } -static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) +static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { struct super_block *sb = info->dqi_sb; @@ -56,7 +56,7 @@ static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *bu info->dqi_usable_bs, blk << info->dqi_blocksize_bits); } -static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) +static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { struct super_block *sb = info->dqi_sb; -- cgit v1.2.3-70-g09d2 From 268157ba673e2a868c167211e39fcad4ada5fd1e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 27 Jan 2009 15:47:22 +0100 Subject: quota: Coding style fixes Wrap long lines, remove assignments from conditions, rewrite two overcomplicated for loops. Signed-off-by: Jan Kara --- fs/quota/dquot.c | 181 +++++++++++++++++++++++++++++++------------------- fs/quota/quota.c | 35 ++++++---- fs/quota/quota_tree.c | 42 +++++++----- fs/quota/quota_v1.c | 48 ++++++++----- fs/quota/quota_v2.c | 3 +- 5 files changed, 198 insertions(+), 111 deletions(-) (limited to 'fs') diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 4a37b8e32b4..a1bd5eabbe5 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -156,7 +156,9 @@ void unregister_quota_format(struct quota_format_type *fmt) struct quota_format_type **actqf; spin_lock(&dq_list_lock); - for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); + for (actqf = "a_formats; *actqf && *actqf != fmt; + actqf = &(*actqf)->qf_next) + ; if (*actqf) *actqf = (*actqf)->qf_next; spin_unlock(&dq_list_lock); @@ -168,18 +170,25 @@ static struct quota_format_type *find_quota_format(int id) struct quota_format_type *actqf; spin_lock(&dq_list_lock); - for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; + actqf = actqf->qf_next) + ; if (!actqf || !try_module_get(actqf->qf_owner)) { int qm; spin_unlock(&dq_list_lock); - for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); - if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) + for (qm = 0; module_names[qm].qm_fmt_id && + module_names[qm].qm_fmt_id != id; qm++) + ; + if (!module_names[qm].qm_fmt_id || + request_module(module_names[qm].qm_mod_name)) return NULL; spin_lock(&dq_list_lock); - for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; + actqf = actqf->qf_next) + ; if (actqf && !try_module_get(actqf->qf_owner)) actqf = NULL; } @@ -234,7 +243,8 @@ hashfn(const struct super_block *sb, unsigned int id, int type) */ static inline void insert_dquot_hash(struct dquot *dquot) { - struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); + struct hlist_head *head; + head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); hlist_add_head(&dquot->dq_hash, head); } @@ -251,7 +261,8 @@ static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, hlist_for_each (node, dquot_hash+hashent) { dquot = hlist_entry(node, struct dquot, dq_hash); - if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) + if (dquot->dq_sb == sb && dquot->dq_id == id && + dquot->dq_type == type) return dquot; } return NULL; @@ -351,8 +362,10 @@ int dquot_acquire(struct dquot *dquot) if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); /* Write the info if needed */ - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (info_dirty(&dqopt->info[dquot->dq_type])) { + ret2 = dqopt->ops[dquot->dq_type]->write_file_info( + dquot->dq_sb, dquot->dq_type); + } if (ret < 0) goto out_iolock; if (ret2 < 0) { @@ -387,8 +400,10 @@ int dquot_commit(struct dquot *dquot) * => we have better not writing it */ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (info_dirty(&dqopt->info[dquot->dq_type])) { + ret2 = dqopt->ops[dquot->dq_type]->write_file_info( + dquot->dq_sb, dquot->dq_type); + } if (ret >= 0) ret = ret2; } @@ -414,8 +429,10 @@ int dquot_release(struct dquot *dquot) if (dqopt->ops[dquot->dq_type]->release_dqblk) { ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); /* Write the info */ - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (info_dirty(&dqopt->info[dquot->dq_type])) { + ret2 = dqopt->ops[dquot->dq_type]->write_file_info( + dquot->dq_sb, dquot->dq_type); + } if (ret >= 0) ret = ret2; } @@ -543,7 +560,8 @@ int vfs_quota_sync(struct super_block *sb, int type) spin_lock(&dq_list_lock); dirty = &dqopt->info[cnt].dqi_dirty_list; while (!list_empty(dirty)) { - dquot = list_first_entry(dirty, struct dquot, dq_dirty); + dquot = list_first_entry(dirty, struct dquot, + dq_dirty); /* Dirty and inactive can be only bad dquot... */ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { clear_dquot_dirty(dquot); @@ -763,11 +781,12 @@ we_slept: dqstats.lookups++; spin_unlock(&dq_list_lock); } - /* Wait for dq_lock - after this we know that either dquot_release() is already - * finished or it will be canceled due to dq_count > 1 test */ + /* Wait for dq_lock - after this we know that either dquot_release() is + * already finished or it will be canceled due to dq_count > 1 test */ wait_on_dquot(dquot); - /* Read the dquot and instantiate it (everything done only if needed) */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { + /* Read the dquot / allocate space in quota file */ + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && + sb->dq_op->acquire_dquot(dquot) < 0) { dqput(dquot); dquot = NULL; goto out; @@ -828,7 +847,10 @@ static void add_dquot_ref(struct super_block *sb, int type) iput(old_inode); } -/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ +/* + * Return 0 if dqput() won't block. + * (note that 1 doesn't necessarily mean blocking) + */ static inline int dqput_blocks(struct dquot *dquot) { if (atomic_read(&dquot->dq_count) <= 1) @@ -836,8 +858,11 @@ static inline int dqput_blocks(struct dquot *dquot) return 0; } -/* Remove references to dquots from inode - add dquot to list for freeing if needed */ -/* We can't race with anybody because we hold dqptr_sem for writing... */ +/* + * Remove references to dquots from inode and add dquot to list for freeing + * if we have the last referece to dquot + * We can't race with anybody because we hold dqptr_sem for writing... + */ static int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head) { @@ -851,7 +876,9 @@ static int remove_inode_dquot_ref(struct inode *inode, int type, printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); #endif spin_lock(&dq_list_lock); - list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ + /* As dquot must have currently users it can't be on + * the free list... */ + list_add(&dquot->dq_free, tofree_head); spin_unlock(&dq_list_lock); return 1; } @@ -861,19 +888,22 @@ static int remove_inode_dquot_ref(struct inode *inode, int type, return 0; } -/* Free list of dquots - called from inode.c */ -/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ +/* + * Free list of dquots + * Dquots are removed from inodes and no new references can be got so we are + * the only ones holding reference + */ static void put_dquot_list(struct list_head *tofree_head) { struct list_head *act_head; struct dquot *dquot; act_head = tofree_head->next; - /* So now we have dquots on the list... Just free them */ while (act_head != tofree_head) { dquot = list_entry(act_head, struct dquot, dq_free); act_head = act_head->next; - list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ + /* Remove dquot from the list so we won't have problems... */ + list_del_init(&dquot->dq_free); dqput(dquot); } } @@ -1131,37 +1161,42 @@ static int ignore_hardlimit(struct dquot *dquot) struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; return capable(CAP_SYS_RESOURCE) && - (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); + (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || + !(info->dqi_flags & V1_DQF_RSQUASH)); } /* needs dq_data_lock */ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) { + qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; + *warntype = QUOTA_NL_NOWARN; if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; if (dquot->dq_dqb.dqb_ihardlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && + newinodes > dquot->dq_dqb.dqb_ihardlimit && !ignore_hardlimit(dquot)) { *warntype = QUOTA_NL_IHARDWARN; return NO_QUOTA; } if (dquot->dq_dqb.dqb_isoftlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && - dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && + newinodes > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime && + get_seconds() >= dquot->dq_dqb.dqb_itime && !ignore_hardlimit(dquot)) { *warntype = QUOTA_NL_ISOFTLONGWARN; return NO_QUOTA; } if (dquot->dq_dqb.dqb_isoftlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + newinodes > dquot->dq_dqb.dqb_isoftlimit && dquot->dq_dqb.dqb_itime == 0) { *warntype = QUOTA_NL_ISOFTWARN; - dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; + dquot->dq_dqb.dqb_itime = get_seconds() + + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; } return QUOTA_OK; @@ -1171,9 +1206,10 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) { qsize_t tspace; + struct super_block *sb = dquot->dq_sb; *warntype = QUOTA_NL_NOWARN; - if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || + if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; @@ -1190,7 +1226,8 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war if (dquot->dq_dqb.dqb_bsoftlimit && tspace > dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && + dquot->dq_dqb.dqb_btime && + get_seconds() >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = QUOTA_NL_BSOFTLONGWARN; @@ -1202,7 +1239,8 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war dquot->dq_dqb.dqb_btime == 0) { if (!prealloc) { *warntype = QUOTA_NL_BSOFTWARN; - dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; + dquot->dq_dqb.dqb_btime = get_seconds() + + sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace; } else /* @@ -1217,15 +1255,18 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war static int info_idq_free(struct dquot *dquot, qsize_t inodes) { + qsize_t newinodes; + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) return QUOTA_NL_NOWARN; - if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) + newinodes = dquot->dq_dqb.dqb_curinodes - inodes; + if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) return QUOTA_NL_ISOFTBELOW; if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && - dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) + newinodes < dquot->dq_dqb.dqb_ihardlimit) return QUOTA_NL_IHARDBELOW; return QUOTA_NL_NOWARN; } @@ -1466,7 +1507,8 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number) for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!inode->i_dquot[cnt]) continue; - if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) + if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) + == NO_QUOTA) goto warn_put_all; } @@ -1673,19 +1715,13 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) transfer_from[cnt] = NULL; transfer_to[cnt] = NULL; warntype_to[cnt] = QUOTA_NL_NOWARN; - switch (cnt) { - case USRQUOTA: - if (!chuid) - continue; - transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); - break; - case GRPQUOTA: - if (!chgid) - continue; - transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); - break; - } } + if (chuid) + transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid, + USRQUOTA); + if (chgid) + transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid, + GRPQUOTA); down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); /* Now recheck reliably when holding dqptr_sem */ @@ -1881,8 +1917,8 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) drop_dquot_ref(sb, cnt); invalidate_dquots(sb, cnt); /* - * Now all dquots should be invalidated, all writes done so we should be only - * users of the info. No locks needed. + * Now all dquots should be invalidated, all writes done so we + * should be only users of the info. No locks needed. */ if (info_dirty(&dqopt->info[cnt])) sb->dq_op->write_info(sb, cnt); @@ -1920,10 +1956,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) /* If quota was reenabled in the meantime, we have * nothing to do */ if (!sb_has_quota_loaded(sb, cnt)) { - mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); + mutex_lock_nested(&toputinode[cnt]->i_mutex, + I_MUTEX_QUOTA); toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | S_NOATIME | S_NOQUOTA); - truncate_inode_pages(&toputinode[cnt]->i_data, 0); + truncate_inode_pages(&toputinode[cnt]->i_data, + 0); mutex_unlock(&toputinode[cnt]->i_mutex); mark_inode_dirty(toputinode[cnt]); } @@ -2013,7 +2051,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, * possible) Also nobody should write to the file - we use * special IO operations which ignore the immutable bit. */ down_write(&dqopt->dqptr_sem); - oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); + oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | + S_NOQUOTA); inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; up_write(&dqopt->dqptr_sem); sb->dq_op->drop(inode); @@ -2032,7 +2071,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, dqopt->info[type].dqi_fmt_id = format_id; INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); mutex_lock(&dqopt->dqio_mutex); - if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { + error = dqopt->ops[type]->read_file_info(sb, type); + if (error < 0) { mutex_unlock(&dqopt->dqio_mutex); goto out_file_init; } @@ -2254,7 +2294,8 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) spin_unlock(&dq_data_lock); } -int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, + struct if_dqblk *di) { struct dquot *dquot; @@ -2318,22 +2359,25 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) } if (check_blim) { - if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { + if (!dm->dqb_bsoftlimit || + dm->dqb_curspace < dm->dqb_bsoftlimit) { dm->dqb_btime = 0; clear_bit(DQ_BLKS_B, &dquot->dq_flags); - } - else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ + } else if (!(di->dqb_valid & QIF_BTIME)) + /* Set grace only if user hasn't provided his own... */ dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; } if (check_ilim) { - if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { + if (!dm->dqb_isoftlimit || + dm->dqb_curinodes < dm->dqb_isoftlimit) { dm->dqb_itime = 0; clear_bit(DQ_INODES_B, &dquot->dq_flags); - } - else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ + } else if (!(di->dqb_valid & QIF_ITIME)) + /* Set grace only if user hasn't provided his own... */ dm->dqb_itime = get_seconds() + dqi->dqi_igrace; } - if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) + if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || + dm->dqb_isoftlimit) clear_bit(DQ_FAKE_B, &dquot->dq_flags); else set_bit(DQ_FAKE_B, &dquot->dq_flags); @@ -2343,7 +2387,8 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) return 0; } -int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, + struct if_dqblk *di) { struct dquot *dquot; int rc; @@ -2400,7 +2445,8 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) if (ii->dqi_valid & IIF_IGRACE) mi->dqi_igrace = ii->dqi_igrace; if (ii->dqi_valid & IIF_FLAGS) - mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); + mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | + (ii->dqi_flags & DQF_MASK); spin_unlock(&dq_data_lock); mark_info_dirty(sb, type); /* Force write to disk */ @@ -2559,7 +2605,8 @@ static int __init dquot_init(void) #ifdef CONFIG_QUOTA_NETLINK_INTERFACE if (genl_register_family("a_genl_family) != 0) - printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); + printk(KERN_ERR + "VFS: Failed to create quota netlink interface.\n"); #endif return 0; diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 89541bcbe27..b7f5a468f07 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -20,7 +20,8 @@ #include /* Check validity of generic quotactl commands */ -static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, + qid_t id) { if (type >= MAXQUOTAS) return -EINVAL; @@ -72,7 +73,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid case Q_SETINFO: case Q_SETQUOTA: case Q_GETQUOTA: - /* This is just informative test so we are satisfied without a lock */ + /* This is just an informative test so we are satisfied + * without the lock */ if (!sb_has_quota_active(sb, type)) return -ESRCH; } @@ -92,7 +94,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid } /* Check validity of XFS Quota Manager commands */ -static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, + qid_t id) { if (type >= XQM_MAXQUOTAS) return -EINVAL; @@ -142,7 +145,8 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i return 0; } -static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +static int check_quotactl_valid(struct super_block *sb, int type, int cmd, + qid_t id) { int error; @@ -180,7 +184,8 @@ static void quota_sync_sb(struct super_block *sb, int type) continue; if (!sb_has_quota_active(sb, cnt)) continue; - mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); + mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, + I_MUTEX_QUOTA); truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); } @@ -200,14 +205,15 @@ void sync_dquots(struct super_block *sb, int type) spin_lock(&sb_lock); restart: list_for_each_entry(sb, &super_blocks, s_list) { - /* This test just improves performance so it needn't be reliable... */ + /* This test just improves performance so it needn't be + * reliable... */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && type != cnt) continue; if (!sb_has_quota_active(sb, cnt)) continue; if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && - list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) + list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) continue; break; } @@ -227,7 +233,8 @@ restart: } /* Copy parameters and call proper function */ -static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) +static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, + void __user *addr) { int ret; @@ -235,7 +242,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void case Q_QUOTAON: { char *pathname; - if (IS_ERR(pathname = getname(addr))) + pathname = getname(addr); + if (IS_ERR(pathname)) return PTR_ERR(pathname); ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); putname(pathname); @@ -261,7 +269,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void case Q_GETINFO: { struct if_dqinfo info; - if ((ret = sb->s_qcop->get_info(sb, type, &info))) + ret = sb->s_qcop->get_info(sb, type, &info); + if (ret) return ret; if (copy_to_user(addr, &info, sizeof(info))) return -EFAULT; @@ -277,7 +286,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void case Q_GETQUOTA: { struct if_dqblk idq; - if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) + ret = sb->s_qcop->get_dqblk(sb, type, id, &idq); + if (ret) return ret; if (copy_to_user(addr, &idq, sizeof(idq))) return -EFAULT; @@ -322,7 +332,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void case Q_XGETQUOTA: { struct fs_disk_quota fdq; - if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) + ret = sb->s_qcop->get_xquota(sb, type, id, &fdq); + if (ret) return ret; if (copy_to_user(addr, &fdq, sizeof(fdq))) return -EFAULT; diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 48e35a48f1c..f81f4bcfb17 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -43,7 +43,8 @@ static char *getdqbuf(size_t size) { char *buf = kmalloc(size, GFP_NOFS); if (!buf) - printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); + printk(KERN_WARNING + "VFS: Not enough memory for quota buffers.\n"); return buf; } @@ -113,7 +114,8 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) } /* Remove given block from the list of blocks with free entries */ -static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) +static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, + uint blk) { char *tmpbuf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; @@ -150,7 +152,9 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint bl dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); /* No matter whether write succeeds block is out of list */ if (write_blk(info, blk, buf) < 0) - printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); + printk(KERN_ERR + "VFS: Can't write block (%u) with free entries.\n", + blk); return 0; out_buf: kfree(tmpbuf); @@ -158,7 +162,8 @@ out_buf: } /* Insert given block to the beginning of list with free entries */ -static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) +static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, + uint blk) { char *tmpbuf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; @@ -230,7 +235,8 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, return 0; } memset(buf, 0, info->dqi_usable_bs); - /* This is enough as block is already zeroed and entry list is empty... */ + /* This is enough as the block is already zeroed and the entry + * list is empty... */ info->dqi_free_entry = blk; mark_info_dirty(dquot->dq_sb, dquot->dq_type); } @@ -246,10 +252,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, } le16_add_cpu(&dh->dqdh_entries, 1); /* Find free structure in block */ - for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader); - i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); - i++, ddquot += info->dqi_entry_size) - ; + ddquot = buf + sizeof(struct qt_disk_dqdbheader); + for (i = 0; i < qtree_dqstr_in_blk(info); i++) { + if (qtree_entry_unused(info, ddquot)) + break; + ddquot += info->dqi_entry_size; + } #ifdef __QUOTA_QT_PARANOIA if (i == qtree_dqstr_in_blk(info)) { printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " @@ -340,7 +348,8 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, } /* - * We don't have to be afraid of deadlocks as we never have quotas on quota files... + * We don't have to be afraid of deadlocks as we never have quotas on quota + * files... */ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) { @@ -515,10 +524,12 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); goto out_buf; } - for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader); - i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); - i++, ddquot += info->dqi_entry_size) - ; + ddquot = buf + sizeof(struct qt_disk_dqdbheader); + for (i = 0; i < qtree_dqstr_in_blk(info); i++) { + if (info->dqi_ops->is_id(ddquot, dquot)) + break; + ddquot += info->dqi_entry_size; + } if (i == qtree_dqstr_in_blk(info)) { printk(KERN_ERR "VFS: Quota for id %u referenced " "but not present.\n", dquot->dq_id); @@ -632,7 +643,8 @@ EXPORT_SYMBOL(qtree_read_dquot); * the only one operating on dquot (thanks to dq_lock) */ int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) { - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && + !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) return qtree_delete_dquot(info, dquot); return 0; } diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c index b4af1c69ad1..0edcf42b177 100644 --- a/fs/quota/quota_v1.c +++ b/fs/quota/quota_v1.c @@ -62,11 +62,14 @@ static int v1_read_dqblk(struct dquot *dquot) /* Set structure to 0s in case read fails/is after end of file */ memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); - dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); + dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); - if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && - dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) + if (dquot->dq_dqb.dqb_bhardlimit == 0 && + dquot->dq_dqb.dqb_bsoftlimit == 0 && + dquot->dq_dqb.dqb_ihardlimit == 0 && + dquot->dq_dqb.dqb_isoftlimit == 0) set_bit(DQ_FAKE_B, &dquot->dq_flags); dqstats.reads++; @@ -81,13 +84,16 @@ static int v1_commit_dqblk(struct dquot *dquot) v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); if (dquot->dq_id == 0) { - dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; - dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; + dqblk.dqb_btime = + sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; + dqblk.dqb_itime = + sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; } ret = 0; if (sb_dqopt(dquot->dq_sb)->files[type]) - ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); + ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, + (char *)&dqblk, sizeof(struct v1_disk_dqblk), + v1_dqoff(dquot->dq_id)); if (ret != sizeof(struct v1_disk_dqblk)) { printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", dquot->dq_sb->s_id); @@ -130,15 +136,20 @@ static int v1_check_quota_file(struct super_block *sb, int type) return 0; blocks = isize >> BLOCK_SIZE_BITS; off = isize & (BLOCK_SIZE - 1); - if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) + if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % + sizeof(struct v1_disk_dqblk)) return 0; - /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); + /* Doublecheck whether we didn't get file with new format - with old + * quotactl() this could happen */ + size = sb->s_op->quota_read(sb, type, (char *)&dqhead, + sizeof(struct v2_disk_dqheader), 0); if (size != sizeof(struct v2_disk_dqheader)) return 1; /* Probably not new format */ if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) return 1; /* Definitely not new format */ - printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); + printk(KERN_INFO + "VFS: %s: Refusing to turn on old quota format on given file." + " It probably contains newer quota format.\n", sb->s_id); return 0; /* Seems like a new format file -> refuse it */ } @@ -148,7 +159,9 @@ static int v1_read_file_info(struct super_block *sb, int type) struct v1_disk_dqblk dqblk; int ret; - if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { + ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(0)); + if (ret != sizeof(struct v1_disk_dqblk)) { if (ret >= 0) ret = -EIO; goto out; @@ -157,8 +170,10 @@ static int v1_read_file_info(struct super_block *sb, int type) /* limits are stored as unsigned 32-bit data */ dqopt->info[type].dqi_maxblimit = 0xffffffff; dqopt->info[type].dqi_maxilimit = 0xffffffff; - dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; - dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; + dqopt->info[type].dqi_igrace = + dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; + dqopt->info[type].dqi_bgrace = + dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; out: return ret; } @@ -170,8 +185,9 @@ static int v1_write_file_info(struct super_block *sb, int type) int ret; dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; - if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { + ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(0)); + if (ret != sizeof(struct v1_disk_dqblk)) { if (ret >= 0) ret = -EIO; goto out; diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index b618b563635..a5475fb1ae4 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -54,7 +54,8 @@ static int v2_check_quota_file(struct super_block *sb, int type) static const uint quota_magics[] = V2_INITQMAGICS; static const uint quota_versions[] = V2_INITQVERSIONS; - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); + size = sb->s_op->quota_read(sb, type, (char *)&dqhead, + sizeof(struct v2_disk_dqheader), 0); if (size != sizeof(struct v2_disk_dqheader)) { printk("quota_v2: failed read expected=%zd got=%zd\n", sizeof(struct v2_disk_dqheader), size); -- cgit v1.2.3-70-g09d2 From 620372a9ffc6f1239a4f379ff145de9e4b5d23ba Mon Sep 17 00:00:00 2001 From: Matt LaPlante Date: Tue, 10 Feb 2009 14:50:34 +0100 Subject: trivial: fix typos/grammar errors in fs/Kconfig Signed-off-by: Matt LaPlante Acked-by: Randy Dunlap Signed-off-by: Jiri Kosina Signed-off-by: Jan Kara --- fs/quota/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index d9750d86fde..8047e01ef46 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig @@ -33,7 +33,7 @@ config PRINT_QUOTA_WARNING Note that this behavior is currently deprecated and may go away in future. Please use notification via netlink socket instead. -# Generic support for tree structured quota files. Seleted when needed. +# Generic support for tree structured quota files. Selected when needed. config QUOTA_TREE tristate -- cgit v1.2.3-70-g09d2 From c16831b4cc9b0805adf8ca3001752a7ec10a17bf Mon Sep 17 00:00:00 2001 From: Manish Katiyar Date: Thu, 12 Feb 2009 21:57:04 +0100 Subject: ext2: Zero our b_size in ext2_quota_read() ext2_quota_read() doesn't initialize tmp_bh.b_size before calling ext2_get_block() where we access it. Since it is a local variable it might contain some garbage. Make sure it is filled with reasonable value before passing. Signed-off-by: Manish Katiyar Signed-off-by: Jan Kara --- fs/ext2/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 7c6e3606f0e..f983225266d 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -1331,6 +1331,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, sb->s_blocksize - offset : toread; tmp_bh.b_state = 0; + tmp_bh.b_size = sb->s_blocksize; err = ext2_get_block(inode, blk, &tmp_bh, 0); if (err < 0) return err; -- cgit v1.2.3-70-g09d2